1 /* Graph coloring register allocator
2 Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
3 Contributed by Michael Matz <matz@suse.de>
4 and Daniel Berlin <dan@cgsoftware.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under the
9 terms of the GNU General Public License as published by the Free Software
10 Foundation; either version 2, or (at your option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
17 You should have received a copy of the GNU General Public License along
18 with GCC; see the file COPYING. If not, write to the Free Software
19 Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
23 #include "coretypes.h"
27 #include "insn-config.h"
32 #include "hard-reg-set.h"
33 #include "basic-block.h"
39 /* This file is part of the graph coloring register allocator.
40 It deals with building the interference graph. When rebuilding
41 the graph for a function after spilling, we rebuild only those
42 parts needed, i.e. it works incrementally.
44 The first part (the functions called from build_web_parts_and_conflicts()
45 ) constructs a web_part for each pseudo register reference in the insn
46 stream, then goes backward from each use, until it reaches defs for that
47 pseudo. While going back it remember seen defs for other registers as
48 conflicts. By connecting the uses and defs, which reach each other, webs
49 (or live ranges) are built conceptually.
51 The second part (make_webs() and children) deals with converting that
52 structure to the nodes and edges, on which our interference graph is
53 built. For each root web part constructed above, an instance of struct
54 web is created. For all subregs of pseudos, which matter for allocation,
55 a subweb of the corresponding super web is built. Finally all the
56 conflicts noted in the first part (as bitmaps) are transformed into real
59 As part of that process the webs are also classified (their spill cost
60 is calculated, and if they are spillable at all, and if not, for what
61 reason; or if they are rematerializable), and move insns are collected,
62 which are potentially coalescable.
64 The top-level entry of this file (build_i_graph) puts it all together,
65 and leaves us with a complete interference graph, which just has to
71 static unsigned HOST_WIDE_INT rtx_to_undefined (rtx);
72 static bitmap find_sub_conflicts (struct web_part *, unsigned int);
73 static bitmap get_sub_conflicts (struct web_part *, unsigned int);
74 static unsigned int undef_to_size_word (rtx, unsigned HOST_WIDE_INT *);
75 static bitmap undef_to_bitmap (struct web_part *,
76 unsigned HOST_WIDE_INT *);
77 static struct web_part * find_web_part_1 (struct web_part *);
78 static struct web_part * union_web_part_roots
79 (struct web_part *, struct web_part *);
80 static int defuse_overlap_p_1 (rtx, struct curr_use *);
81 static int live_out_1 (struct df *, struct curr_use *, rtx);
82 static int live_out (struct df *, struct curr_use *, rtx);
83 static rtx live_in_edge ( struct df *, struct curr_use *, edge);
84 static void live_in (struct df *, struct curr_use *, rtx);
85 static int copy_insn_p (rtx, rtx *, rtx *);
86 static void remember_move (rtx);
87 static void handle_asm_insn (struct df *, rtx);
88 static void prune_hardregs_for_mode (HARD_REG_SET *, enum machine_mode);
89 static void init_one_web_common (struct web *, rtx);
90 static void init_one_web (struct web *, rtx);
91 static void reinit_one_web (struct web *, rtx);
92 static struct web * add_subweb (struct web *, rtx);
93 static struct web * add_subweb_2 (struct web *, unsigned int);
94 static void init_web_parts (struct df *);
95 static void copy_conflict_list (struct web *);
96 static void add_conflict_edge (struct web *, struct web *);
97 static void build_inverse_webs (struct web *);
98 static void copy_web (struct web *, struct web_link **);
99 static void compare_and_free_webs (struct web_link **);
100 static void init_webs_defs_uses (void);
101 static unsigned int parts_to_webs_1 (struct df *, struct web_link **,
103 static void parts_to_webs (struct df *);
104 static void reset_conflicts (void);
106 static void check_conflict_numbers (void)
108 static void conflicts_between_webs (struct df *);
109 static void remember_web_was_spilled (struct web *);
110 static void detect_spill_temps (void);
111 static int contains_pseudo (rtx);
112 static int want_to_remat (rtx x);
113 static void detect_remat_webs (void);
114 static void determine_web_costs (void);
115 static void detect_webs_set_in_cond_jump (void);
116 static void make_webs (struct df *);
117 static void moves_to_webs (struct df *);
118 static void connect_rmw_web_parts (struct df *);
119 static void update_regnos_mentioned (void);
120 static void livethrough_conflicts_bb (basic_block);
121 static void init_bb_info (void);
122 static void free_bb_info (void);
123 static void build_web_parts_and_conflicts (struct df *);
126 /* A sbitmap of DF_REF_IDs of uses, which are live over an abnormal
128 static sbitmap live_over_abnormal;
130 /* To cache if we already saw a certain edge while analyzing one
131 use, we use a tick count incremented per use. */
132 static unsigned int visited_pass;
134 /* A sbitmap of UIDs of move insns, which we already analyzed. */
135 static sbitmap move_handled;
137 /* One such structed is allocated per insn, and traces for the currently
138 analyzed use, which web part belongs to it, and how many bytes of
139 it were still undefined when that insn was reached. */
143 unsigned HOST_WIDE_INT undefined;
145 /* Indexed by UID. */
146 static struct visit_trace *visit_trace;
148 /* Per basic block we have one such structure, used to speed up
149 the backtracing of uses. */
152 /* The value of visited_pass, as the first insn of this BB was reached
153 the last time. If this equals the current visited_pass, then
154 undefined is valid. Otherwise not. */
156 /* The still undefined bytes at that time. The use to which this is
157 relative is the current use. */
158 unsigned HOST_WIDE_INT undefined;
159 /* Bit regno is set, if that regno is mentioned in this BB as a def, or
160 the source of a copy insn. In these cases we can not skip the whole
161 block if we reach it from the end. */
162 bitmap regnos_mentioned;
163 /* If a use reaches the end of a BB, and that BB doesn't mention its
164 regno, we skip the block, and remember the ID of that use
165 as living throughout the whole block. */
166 bitmap live_throughout;
167 /* The content of the aux field before placing a pointer to this
172 /* We need a fast way to describe a certain part of a register.
173 Therefore we put together the size and offset (in bytes) in one
175 #define BL_TO_WORD(b, l) ((((b) & 0xFFFF) << 16) | ((l) & 0xFFFF))
176 #define BYTE_BEGIN(i) (((unsigned int)(i) >> 16) & 0xFFFF)
177 #define BYTE_LENGTH(i) ((unsigned int)(i) & 0xFFFF)
179 /* For a REG or SUBREG expression X return the size/offset pair
185 unsigned int len, beg;
186 len = GET_MODE_SIZE (GET_MODE (x));
187 beg = (GET_CODE (x) == SUBREG) ? SUBREG_BYTE (x) : 0;
188 return BL_TO_WORD (beg, len);
191 /* X is a REG or SUBREG rtx. Return the bytes it touches as a bitmask. */
193 static unsigned HOST_WIDE_INT
194 rtx_to_undefined (rtx x)
196 unsigned int len, beg;
197 unsigned HOST_WIDE_INT ret;
198 len = GET_MODE_SIZE (GET_MODE (x));
199 beg = (GET_CODE (x) == SUBREG) ? SUBREG_BYTE (x) : 0;
200 ret = ~ ((unsigned HOST_WIDE_INT) 0);
201 ret = (~(ret << len)) << beg;
205 /* We remember if we've analyzed an insn for being a move insn, and if yes
206 between which operands. */
214 /* On demand cache, for if insns are copy insns, and if yes, what
215 source/target they have. */
216 static struct copy_p_cache * copy_cache;
220 /* For INSN, return nonzero, if it's a move insn, we consider to coalesce
221 later, and place the operands in *SOURCE and *TARGET, if they are
225 copy_insn_p (rtx insn, rtx *source, rtx *target)
228 unsigned int d_regno, s_regno;
229 int uid = INSN_UID (insn);
231 gcc_assert (INSN_P (insn));
233 /* First look, if we already saw this insn. */
234 if (copy_cache[uid].seen)
236 /* And if we saw it, if it's actually a copy insn. */
237 if (copy_cache[uid].seen == 1)
240 *source = copy_cache[uid].source;
242 *target = copy_cache[uid].target;
248 /* Mark it as seen, but not being a copy insn. */
249 copy_cache[uid].seen = 2;
250 insn = single_set (insn);
256 /* We recognize moves between subreg's as copy insns. This is used to avoid
257 conflicts of those subwebs. But they are currently _not_ used for
258 coalescing (the check for this is in remember_move() below). */
259 while (GET_CODE (d) == STRICT_LOW_PART)
262 && (GET_CODE (d) != SUBREG || !REG_P (SUBREG_REG (d))))
264 while (GET_CODE (s) == STRICT_LOW_PART)
267 && (GET_CODE (s) != SUBREG || !REG_P (SUBREG_REG (s))))
270 s_regno = (unsigned) REGNO (GET_CODE (s) == SUBREG ? SUBREG_REG (s) : s);
271 d_regno = (unsigned) REGNO (GET_CODE (d) == SUBREG ? SUBREG_REG (d) : d);
273 /* Copies between hardregs are useless for us, as not coalesable anyway. */
274 if ((s_regno < FIRST_PSEUDO_REGISTER
275 && d_regno < FIRST_PSEUDO_REGISTER)
276 || s_regno >= max_normal_pseudo
277 || d_regno >= max_normal_pseudo)
285 /* Still mark it as seen, but as a copy insn this time. */
286 copy_cache[uid].seen = 1;
287 copy_cache[uid].source = s;
288 copy_cache[uid].target = d;
292 /* We build webs, as we process the conflicts. For each use we go upward
293 the insn stream, noting any defs as potentially conflicting with the
294 current use. We stop at defs of the current regno. The conflicts are only
295 potentially, because we may never reach a def, so this is an undefined use,
296 which conflicts with nothing. */
299 /* Given a web part WP, and the location of a reg part SIZE_WORD
300 return the conflict bitmap for that reg part, or NULL if it doesn't
304 find_sub_conflicts (struct web_part *wp, unsigned int size_word)
306 struct tagged_conflict *cl;
307 cl = wp->sub_conflicts;
308 for (; cl; cl = cl->next)
309 if (cl->size_word == size_word)
310 return cl->conflicts;
314 /* Similar to find_sub_conflicts(), but creates that bitmap, if it
315 doesn't exist. I.e. this never returns NULL. */
318 get_sub_conflicts (struct web_part *wp, unsigned int size_word)
320 bitmap b = find_sub_conflicts (wp, size_word);
323 struct tagged_conflict *cl = ra_alloc (sizeof *cl);
324 cl->conflicts = BITMAP_XMALLOC ();
325 cl->size_word = size_word;
326 cl->next = wp->sub_conflicts;
327 wp->sub_conflicts = cl;
333 /* Helper table for undef_to_size_word() below for small values
334 of UNDEFINED. Offsets and lengths are byte based. */
335 static struct undef_table_s {
336 unsigned int new_undef;
337 /* size | (byte << 16) */
338 unsigned int size_word;
339 } const undef_table [] = {
340 { 0, BL_TO_WORD (0, 0)}, /* 0 */
341 { 0, BL_TO_WORD (0, 1)},
342 { 0, BL_TO_WORD (1, 1)},
343 { 0, BL_TO_WORD (0, 2)},
344 { 0, BL_TO_WORD (2, 1)}, /* 4 */
345 { 1, BL_TO_WORD (2, 1)},
346 { 2, BL_TO_WORD (2, 1)},
347 { 3, BL_TO_WORD (2, 1)},
348 { 0, BL_TO_WORD (3, 1)}, /* 8 */
349 { 1, BL_TO_WORD (3, 1)},
350 { 2, BL_TO_WORD (3, 1)},
351 { 3, BL_TO_WORD (3, 1)},
352 { 0, BL_TO_WORD (2, 2)}, /* 12 */
353 { 1, BL_TO_WORD (2, 2)},
354 { 2, BL_TO_WORD (2, 2)},
355 { 0, BL_TO_WORD (0, 4)}};
357 /* Interpret *UNDEFINED as bitmask where each bit corresponds to a byte.
358 A set bit means an undefined byte. Factor all undefined bytes into
359 groups, and return a size/ofs pair of consecutive undefined bytes,
360 but according to certain borders. Clear out those bits corresponding
361 to bytes overlaid by that size/ofs pair. REG is only used for
362 the mode, to detect if it's a floating mode or not.
364 For example: *UNDEFINED size+ofs new *UNDEFINED
374 undef_to_size_word (rtx reg, unsigned HOST_WIDE_INT *undefined)
376 /* When only the lower four bits are possibly set, we use
377 a fast lookup table. */
378 if (*undefined <= 15)
380 struct undef_table_s u;
381 u = undef_table[*undefined];
382 *undefined = u.new_undef;
386 /* Otherwise we handle certain cases directly. */
387 if (*undefined <= 0xffff)
388 switch ((int) *undefined)
390 case 0x00f0 : *undefined = 0; return BL_TO_WORD (4, 4);
391 case 0x00ff : *undefined = 0; return BL_TO_WORD (0, 8);
392 case 0x0f00 : *undefined = 0; return BL_TO_WORD (8, 4);
393 case 0x0ff0 : *undefined = 0xf0; return BL_TO_WORD (8, 4);
395 if (INTEGRAL_MODE_P (GET_MODE (reg)))
396 { *undefined = 0xff; return BL_TO_WORD (8, 4); }
398 { *undefined = 0; return BL_TO_WORD (0, 12); /* XFmode */ }
399 case 0xf000 : *undefined = 0; return BL_TO_WORD (12, 4);
400 case 0xff00 : *undefined = 0; return BL_TO_WORD (8, 8);
401 case 0xfff0 : *undefined = 0xf0; return BL_TO_WORD (8, 8);
402 case 0xffff : *undefined = 0; return BL_TO_WORD (0, 16);
405 /* And if nothing matched fall back to the general solution. For
406 now unknown undefined bytes are converted to sequences of maximal
407 length 4 bytes. We could make this larger if necessary. */
409 unsigned HOST_WIDE_INT u = *undefined;
411 struct undef_table_s tab;
412 for (word = 0; (u & 15) == 0; word += 4)
415 tab = undef_table[u];
417 u = (*undefined & ~((unsigned HOST_WIDE_INT)15 << word)) | (u << word);
419 /* Size remains the same, only the begin is moved up move bytes. */
420 return tab.size_word + BL_TO_WORD (word, 0);
424 /* Put the above three functions together. For a set of undefined bytes
425 as bitmap *UNDEFINED, look for (create if necessary) and return the
426 corresponding conflict bitmap. Change *UNDEFINED to remove the bytes
427 covered by the part for that bitmap. */
430 undef_to_bitmap (struct web_part *wp, unsigned HOST_WIDE_INT *undefined)
432 unsigned int size_word = undef_to_size_word (DF_REF_REAL_REG (wp->ref),
434 return get_sub_conflicts (wp, size_word);
437 /* Returns the root of the web part P is a member of. Additionally
438 it compresses the path. P may not be NULL. */
440 static struct web_part *
441 find_web_part_1 (struct web_part *p)
443 struct web_part *r = p;
444 struct web_part *p_next;
447 for (; p != r; p = p_next)
455 /* Fast macro for the common case (WP either being the root itself, or
456 the end of an already compressed path. */
458 #define find_web_part(wp) ((! (wp)->uplink) ? (wp) \
459 : (! (wp)->uplink->uplink) ? (wp)->uplink : find_web_part_1 (wp))
461 /* Unions together the parts R1 resp. R2 is a root of.
462 All dynamic information associated with the parts (number of spanned insns
463 and so on) is also merged.
464 The root of the resulting (possibly larger) web part is returned. */
466 static struct web_part *
467 union_web_part_roots (struct web_part *r1, struct web_part *r2)
471 /* The new root is the smaller (pointerwise) of both. This is crucial
472 to make the construction of webs from web parts work (so, when
473 scanning all parts, we see the roots before all its children).
474 Additionally this ensures, that if the web has a def at all, than
475 the root is a def (because all def parts are before use parts in the
476 web_parts[] array), or put another way, as soon, as the root of a
477 web part is not a def, this is an uninitialized web part. The
478 way we construct the I-graph ensures, that if a web is initialized,
479 then the first part we find (besides trivial 1 item parts) has a
483 struct web_part *h = r1;
490 /* Now we merge the dynamic information of R1 and R2. */
491 r1->spanned_deaths += r2->spanned_deaths;
493 if (!r1->sub_conflicts)
494 r1->sub_conflicts = r2->sub_conflicts;
495 else if (r2->sub_conflicts)
496 /* We need to merge the conflict bitmaps from R2 into R1. */
498 struct tagged_conflict *cl1, *cl2;
499 /* First those from R2, which are also contained in R1.
500 We union the bitmaps, and free those from R2, resetting them
502 for (cl1 = r1->sub_conflicts; cl1; cl1 = cl1->next)
503 for (cl2 = r2->sub_conflicts; cl2; cl2 = cl2->next)
504 if (cl1->size_word == cl2->size_word)
506 bitmap_operation (cl1->conflicts, cl1->conflicts,
507 cl2->conflicts, BITMAP_IOR);
508 BITMAP_XFREE (cl2->conflicts);
509 cl2->conflicts = NULL;
511 /* Now the conflict lists from R2 which weren't in R1.
512 We simply copy the entries from R2 into R1' list. */
513 for (cl2 = r2->sub_conflicts; cl2;)
515 struct tagged_conflict *cl_next = cl2->next;
518 cl2->next = r1->sub_conflicts;
519 r1->sub_conflicts = cl2;
524 r2->sub_conflicts = NULL;
525 r1->crosses_call |= r2->crosses_call;
530 /* Convenience macro, that is capable of unioning also non-roots. */
531 #define union_web_parts(p1, p2) \
532 ((p1 == p2) ? find_web_part (p1) \
533 : union_web_part_roots (find_web_part (p1), find_web_part (p2)))
535 /* Remember that we've handled a given move, so we don't reprocess it. */
538 remember_move (rtx insn)
540 if (!TEST_BIT (move_handled, INSN_UID (insn)))
544 struct df_link *slink = DF_INSN_USES (df, insn);
545 struct df_link *link = DF_INSN_DEFS (df, insn);
547 SET_BIT (move_handled, INSN_UID (insn));
548 ret = copy_insn_p (insn, &s, &d);
551 /* Some sanity test for the copy insn. */
552 gcc_assert (link && link->ref);
553 gcc_assert (slink && slink->ref);
554 /* The following (link->next != 0) happens when a hardreg
555 is used in wider mode (REG:DI %eax). Then df.* creates
556 a def/use for each hardreg contained therein. We only
557 allow hardregs here. */
558 gcc_assert (!link->next
559 || DF_REF_REGNO (link->next->ref)
560 < FIRST_PSEUDO_REGISTER);
562 /* XXX for now we don't remember move insns involving any subregs.
563 Those would be difficult to coalesce (we would need to implement
564 handling of all the subwebs in the allocator, including that such
565 subwebs could be source and target of coalescing). */
566 if (REG_P (s) && REG_P (d))
568 struct move *m = ra_calloc (sizeof (struct move));
569 struct move_list *ml;
571 ml = ra_alloc (sizeof (struct move_list));
579 /* This describes the USE currently looked at in the main-loop in
580 build_web_parts_and_conflicts(). */
583 /* This has a 1-bit for each byte in the USE, which is still undefined. */
584 unsigned HOST_WIDE_INT undefined;
585 /* For easy access. */
588 /* If some bits of this USE are live over an abnormal edge. */
589 unsigned int live_over_abnormal;
592 /* Returns nonzero iff rtx DEF and USE have bits in common (but see below).
593 It is only called with DEF and USE being (reg:M a) or (subreg:M1 (reg:M2 a)
594 x) rtx's. Furthermore if it's a subreg rtx M1 is at least one word wide,
595 and a is a multi-word pseudo. If DEF or USE are hardregs, they are in
596 word_mode, so we don't need to check for further hardregs which would result
597 from wider references. We are never called with paradoxical subregs.
600 0 for no common bits,
601 1 if DEF and USE exactly cover the same bytes,
602 2 if the DEF only covers a part of the bits of USE
603 3 if the DEF covers more than the bits of the USE, and
604 4 if both are SUBREG's of different size, but have bytes in common.
605 -1 is a special case, for when DEF and USE refer to the same regno, but
606 have for other reasons no bits in common (can only happen with
607 subregs referring to different words, or to words which already were
608 defined for this USE).
609 Furthermore it modifies use->undefined to clear the bits which get defined
610 by DEF (only for cases with partial overlap).
611 I.e. if bit 1 is set for the result != -1, the USE was completely covered,
612 otherwise a test is needed to track the already defined bytes. */
615 defuse_overlap_p_1 (rtx def, struct curr_use *use)
622 if (GET_CODE (def) == SUBREG)
624 if (REGNO (SUBREG_REG (def)) != use->regno)
628 else if (REGNO (def) != use->regno)
630 if (GET_CODE (use->x) == SUBREG)
634 case 0: /* REG, REG */
636 case 1: /* SUBREG, REG */
638 unsigned HOST_WIDE_INT old_u = use->undefined;
639 use->undefined &= ~ rtx_to_undefined (def);
640 return (old_u != use->undefined) ? 2 : -1;
642 case 2: /* REG, SUBREG */
644 case 3: /* SUBREG, SUBREG */
645 if (GET_MODE_SIZE (GET_MODE (def)) == GET_MODE_SIZE (GET_MODE (use->x)))
646 /* If the size of both things is the same, the subreg's overlap
647 if they refer to the same word. */
648 if (SUBREG_BYTE (def) == SUBREG_BYTE (use->x))
650 /* Now the more difficult part: the same regno is referred, but the
651 sizes of the references or the words differ. E.g.
652 (subreg:SI (reg:CDI a) 0) and (subreg:DI (reg:CDI a) 2) do not
653 overlap, whereas the latter overlaps with (subreg:SI (reg:CDI a) 3).
656 unsigned HOST_WIDE_INT old_u;
658 unsigned int bl1, bl2;
659 bl1 = rtx_to_bits (def);
660 bl2 = rtx_to_bits (use->x);
661 b1 = BYTE_BEGIN (bl1);
662 b2 = BYTE_BEGIN (bl2);
663 e1 = b1 + BYTE_LENGTH (bl1) - 1;
664 e2 = b2 + BYTE_LENGTH (bl2) - 1;
665 if (b1 > e2 || b2 > e1)
667 old_u = use->undefined;
668 use->undefined &= ~ rtx_to_undefined (def);
669 return (old_u != use->undefined) ? 4 : -1;
676 /* Macro for the common case of either def and use having the same rtx,
677 or based on different regnos. */
678 #define defuse_overlap_p(def, use) \
679 ((def) == (use)->x ? 1 : \
680 (REGNO (GET_CODE (def) == SUBREG \
681 ? SUBREG_REG (def) : def) != use->regno \
682 ? 0 : defuse_overlap_p_1 (def, use)))
685 /* The use USE flows into INSN (backwards). Determine INSNs effect on it,
686 and return nonzero, if (parts of) that USE are also live before it.
687 This also notes conflicts between the USE and all DEFS in that insn,
688 and modifies the undefined bits of USE in case parts of it were set in
692 live_out_1 (struct df *df ATTRIBUTE_UNUSED, struct curr_use *use, rtx insn)
695 int uid = INSN_UID (insn);
696 struct web_part *wp = use->wp;
698 /* Mark, that this insn needs this webpart live. */
699 visit_trace[uid].wp = wp;
700 visit_trace[uid].undefined = use->undefined;
704 unsigned int source_regno = ~0;
705 unsigned int regno = use->regno;
706 unsigned HOST_WIDE_INT orig_undef = use->undefined;
707 unsigned HOST_WIDE_INT final_undef = use->undefined;
709 unsigned int n, num_defs = insn_df[uid].num_defs;
710 struct ref **defs = insn_df[uid].defs;
712 /* We want to access the root webpart. */
713 wp = find_web_part (wp);
715 wp->crosses_call = 1;
716 else if (copy_insn_p (insn, &s, NULL))
717 source_regno = REGNO (GET_CODE (s) == SUBREG ? SUBREG_REG (s) : s);
719 /* Look at all DEFS in this insn. */
720 for (n = 0; n < num_defs; n++)
722 struct ref *ref = defs[n];
725 /* Reset the undefined bits for each iteration, in case this
726 insn has more than one set, and one of them sets this regno.
727 But still the original undefined part conflicts with the other
729 use->undefined = orig_undef;
730 if ((lap = defuse_overlap_p (DF_REF_REG (ref), use)) != 0)
733 /* Same regnos but non-overlapping or already defined bits,
734 so ignore this DEF, or better said make the yet undefined
735 part and this DEF conflicting. */
737 unsigned HOST_WIDE_INT undef;
738 undef = use->undefined;
740 bitmap_set_bit (undef_to_bitmap (wp, &undef),
745 /* The current DEF completely covers the USE, so we can
746 stop traversing the code looking for further DEFs. */
749 /* We have a partial overlap. */
751 final_undef &= use->undefined;
752 if (final_undef == 0)
753 /* Now the USE is completely defined, which means, that
754 we can stop looking for former DEFs. */
756 /* If this is a partial overlap, which left some bits
757 in USE undefined, we normally would need to create
758 conflicts between that undefined part and the part of
759 this DEF which overlapped with some of the formerly
760 undefined bits. We don't need to do this, because both
761 parts of this DEF (that which overlaps, and that which
762 doesn't) are written together in this one DEF, and can
763 not be colored in a way which would conflict with
764 the USE. This is only true for partial overlap,
765 because only then the DEF and USE have bits in common,
766 which makes the DEF move, if the USE moves, making them
768 If they have no bits in common (lap == -1), they are
769 really independent. Therefore we there made a
772 /* This is at least a partial overlap, so we need to union
774 wp = union_web_parts (wp, &web_parts[DF_REF_ID (ref)]);
778 /* The DEF and the USE don't overlap at all, different
779 regnos. I.e. make conflicts between the undefined bits,
781 unsigned HOST_WIDE_INT undef = use->undefined;
783 if (regno == source_regno)
784 /* This triggers only, when this was a copy insn and the
785 source is at least a part of the USE currently looked at.
786 In this case only the bits of the USE conflict with the
787 DEF, which are not covered by the source of this copy
788 insn, and which are still undefined. I.e. in the best
789 case (the whole reg being the source), _no_ conflicts
790 between that USE and this DEF (the target of the move)
791 are created by this insn (though they might be by
792 others). This is a super case of the normal copy insn
793 only between full regs. */
795 undef &= ~ rtx_to_undefined (s);
799 /*struct web_part *cwp;
800 cwp = find_web_part (&web_parts[DF_REF_ID
803 /* TODO: somehow instead of noting the ID of the LINK
804 use an ID nearer to the root webpart of that LINK.
805 We can't use the root itself, because we later use the
806 ID to look at the form (reg or subreg, and if yes,
807 which subreg) of this conflict. This means, that we
808 need to remember in the root an ID for each form, and
809 maintaining this, when merging web parts. This makes
810 the bitmaps smaller. */
812 bitmap_set_bit (undef_to_bitmap (wp, &undef),
822 /* If this insn doesn't completely define the USE, increment also
823 it's spanned deaths count (if this insn contains a death). */
824 gcc_assert (uid < death_insns_max_uid);
825 if (TEST_BIT (insns_with_deaths, uid))
826 wp->spanned_deaths++;
827 use->undefined = final_undef;
834 /* Same as live_out_1() (actually calls it), but caches some information.
835 E.g. if we reached this INSN with the current regno already, and the
836 current undefined bits are a subset of those as we came here, we
837 simply connect the web parts of the USE, and the one cached for this
838 INSN, and additionally return zero, indicating we don't need to traverse
839 this path any longer (all effect were already seen, as we first reached
843 live_out (struct df *df, struct curr_use *use, rtx insn)
845 unsigned int uid = INSN_UID (insn);
846 if (visit_trace[uid].wp
847 && DF_REF_REGNO (visit_trace[uid].wp->ref) == use->regno
848 && (use->undefined & ~visit_trace[uid].undefined) == 0)
850 union_web_parts (visit_trace[uid].wp, use->wp);
851 /* Don't search any further, as we already were here with this regno. */
855 return live_out_1 (df, use, insn);
858 /* The current USE reached a basic block head. The edge E is one
859 of the predecessors edges. This evaluates the effect of the predecessor
860 block onto the USE, and returns the next insn, which should be looked at.
861 This either is the last insn of that pred. block, or the first one.
862 The latter happens, when the pred. block has no possible effect on the
863 USE, except for conflicts. In that case, it's remembered, that the USE
864 is live over that whole block, and it's skipped. Otherwise we simply
865 continue with the last insn of the block.
867 This also determines the effects of abnormal edges, and remembers
868 which uses are live at the end of that basic block. */
871 live_in_edge (struct df *df, struct curr_use *use, edge e)
873 struct ra_bb_info *info_pred;
875 /* Call used hard regs die over an exception edge, ergo
876 they don't reach the predecessor block, so ignore such
877 uses. And also don't set the live_over_abnormal flag
879 if ((e->flags & EDGE_EH) && use->regno < FIRST_PSEUDO_REGISTER
880 && call_used_regs[use->regno])
882 if (e->flags & EDGE_ABNORMAL)
883 use->live_over_abnormal = 1;
884 bitmap_set_bit (live_at_end[e->src->index], DF_REF_ID (use->wp->ref));
885 info_pred = (struct ra_bb_info *) e->src->aux;
886 next_insn = BB_END (e->src);
888 /* If the last insn of the pred. block doesn't completely define the
889 current use, we need to check the block. */
890 if (live_out (df, use, next_insn))
892 /* If the current regno isn't mentioned anywhere in the whole block,
893 and the complete use is still undefined... */
894 if (!bitmap_bit_p (info_pred->regnos_mentioned, use->regno)
895 && (rtx_to_undefined (use->x) & ~use->undefined) == 0)
897 /* ...we can hop over the whole block and defer conflict
898 creation to later. */
899 bitmap_set_bit (info_pred->live_throughout,
900 DF_REF_ID (use->wp->ref));
901 next_insn = BB_HEAD (e->src);
909 /* USE flows into the end of the insns preceding INSN. Determine
910 their effects (in live_out()) and possibly loop over the preceding INSN,
911 or call itself recursively on a basic block border. When a topleve
912 call of this function returns the USE is completely analyzed. I.e.
913 its def-use chain (at least) is built, possibly connected with other
914 def-use chains, and all defs during that chain are noted. */
917 live_in (struct df *df, struct curr_use *use, rtx insn)
919 unsigned int loc_vpass = visited_pass;
921 /* Note, that, even _if_ we are called with use->wp a root-part, this might
922 become non-root in the for() loop below (due to live_out() unioning
923 it). So beware, not to change use->wp in a way, for which only root-webs
927 int uid = INSN_UID (insn);
928 basic_block bb = BLOCK_FOR_INSN (insn);
931 /* We want to be as fast as possible, so explicitly write
933 for (insn = PREV_INSN (insn); insn && !INSN_P (insn);
934 insn = PREV_INSN (insn))
938 if (bb != BLOCK_FOR_INSN (insn))
941 unsigned HOST_WIDE_INT undef = use->undefined;
942 struct ra_bb_info *info = (struct ra_bb_info *) bb->aux;
943 if ((e = bb->pred) == NULL)
945 /* We now check, if we already traversed the predecessors of this
946 block for the current pass and the current set of undefined
947 bits. If yes, we don't need to check the predecessors again.
948 So, conceptually this information is tagged to the first
949 insn of a basic block. */
950 if (info->pass == loc_vpass && (undef & ~info->undefined) == 0)
952 info->pass = loc_vpass;
953 info->undefined = undef;
954 /* All but the last predecessor are handled recursively. */
955 for (; e->pred_next; e = e->pred_next)
957 insn = live_in_edge (df, use, e);
959 live_in (df, use, insn);
960 use->undefined = undef;
962 insn = live_in_edge (df, use, e);
966 else if (!live_out (df, use, insn))
971 /* Determine all regnos which are mentioned in a basic block, in an
972 interesting way. Interesting here means either in a def, or as the
973 source of a move insn. We only look at insns added since the last
977 update_regnos_mentioned (void)
979 int last_uid = last_max_uid;
982 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
985 /* Don't look at old insns. */
986 if (INSN_UID (insn) < last_uid)
988 /* XXX We should also remember moves over iterations (we already
989 save the cache, but not the movelist). */
990 if (copy_insn_p (insn, NULL, NULL))
991 remember_move (insn);
993 else if ((bb = BLOCK_FOR_INSN (insn)) != NULL)
996 struct ra_bb_info *info = (struct ra_bb_info *) bb->aux;
997 bitmap mentioned = info->regnos_mentioned;
998 struct df_link *link;
999 if (copy_insn_p (insn, &source, NULL))
1001 remember_move (insn);
1002 bitmap_set_bit (mentioned,
1003 REGNO (GET_CODE (source) == SUBREG
1004 ? SUBREG_REG (source) : source));
1006 for (link = DF_INSN_DEFS (df, insn); link; link = link->next)
1008 bitmap_set_bit (mentioned, DF_REF_REGNO (link->ref));
1013 /* Handle the uses which reach a block end, but were deferred due
1014 to it's regno not being mentioned in that block. This adds the
1015 remaining conflicts and updates also the crosses_call and
1016 spanned_deaths members. */
1019 livethrough_conflicts_bb (basic_block bb)
1021 struct ra_bb_info *info = (struct ra_bb_info *) bb->aux;
1025 unsigned int deaths = 0;
1026 unsigned int contains_call = 0;
1028 /* If there are no deferred uses, just return. */
1029 if ((first = bitmap_first_set_bit (info->live_throughout)) < 0)
1032 /* First collect the IDs of all defs, count the number of death
1033 containing insns, and if there's some call_insn here. */
1034 all_defs = BITMAP_XMALLOC ();
1035 for (insn = BB_HEAD (bb); insn; insn = NEXT_INSN (insn))
1040 struct ra_insn_info info;
1042 info = insn_df[INSN_UID (insn)];
1043 for (n = 0; n < info.num_defs; n++)
1044 bitmap_set_bit (all_defs, DF_REF_ID (info.defs[n]));
1045 if (TEST_BIT (insns_with_deaths, INSN_UID (insn)))
1050 if (insn == BB_END (bb))
1054 /* And now, if we have found anything, make all live_through
1055 uses conflict with all defs, and update their other members. */
1058 || bitmap_first_set_bit (all_defs) >= 0)
1062 EXECUTE_IF_SET_IN_BITMAP (info->live_throughout, first, use_id, bi)
1064 struct web_part *wp = &web_parts[df->def_id + use_id];
1065 unsigned int bl = rtx_to_bits (DF_REF_REG (wp->ref));
1067 wp = find_web_part (wp);
1068 wp->spanned_deaths += deaths;
1069 wp->crosses_call |= contains_call;
1070 conflicts = get_sub_conflicts (wp, bl);
1071 bitmap_operation (conflicts, conflicts, all_defs, BITMAP_IOR);
1075 BITMAP_XFREE (all_defs);
1078 /* Allocate the per basic block info for traversing the insn stream for
1079 building live ranges. */
1087 struct ra_bb_info *info = xcalloc (1, sizeof *info);
1088 info->regnos_mentioned = BITMAP_XMALLOC ();
1089 info->live_throughout = BITMAP_XMALLOC ();
1090 info->old_aux = bb->aux;
1091 bb->aux = (void *) info;
1095 /* Free that per basic block info. */
1103 struct ra_bb_info *info = (struct ra_bb_info *) bb->aux;
1104 BITMAP_XFREE (info->regnos_mentioned);
1105 BITMAP_XFREE (info->live_throughout);
1106 bb->aux = info->old_aux;
1111 /* Toplevel function for the first part of this file.
1112 Connect web parts, thereby implicitly building webs, and remember
1116 build_web_parts_and_conflicts (struct df *df)
1118 struct df_link *link;
1119 struct curr_use use;
1122 number_seen = xcalloc (get_max_uid (), sizeof (int));
1123 visit_trace = xcalloc (get_max_uid (), sizeof (visit_trace[0]));
1124 update_regnos_mentioned ();
1126 /* Here's the main loop.
1127 It goes through all insn's, connects web parts along the way, notes
1128 conflicts between webparts, and remembers move instructions. */
1130 for (use.regno = 0; use.regno < (unsigned int)max_regno; use.regno++)
1131 if (use.regno >= FIRST_PSEUDO_REGISTER || !fixed_regs[use.regno])
1132 for (link = df->regs[use.regno].uses; link; link = link->next)
1135 struct ref *ref = link->ref;
1136 rtx insn = DF_REF_INSN (ref);
1137 /* Only recheck marked or new uses, or uses from hardregs. */
1138 if (use.regno >= FIRST_PSEUDO_REGISTER
1139 && DF_REF_ID (ref) < last_use_id
1140 && !TEST_BIT (last_check_uses, DF_REF_ID (ref)))
1142 use.wp = &web_parts[df->def_id + DF_REF_ID (ref)];
1143 use.x = DF_REF_REG (ref);
1144 use.live_over_abnormal = 0;
1145 use.undefined = rtx_to_undefined (use.x);
1147 live_in (df, &use, insn);
1148 if (use.live_over_abnormal)
1149 SET_BIT (live_over_abnormal, DF_REF_ID (ref));
1152 dump_number_seen ();
1155 struct ra_bb_info *info = (struct ra_bb_info *) bb->aux;
1156 livethrough_conflicts_bb (bb);
1157 bitmap_zero (info->live_throughout);
1164 /* Here we look per insn, for DF references being in uses _and_ defs.
1165 This means, in the RTL a (REG xx) expression was seen as a
1166 read/modify/write, as happens for (set (subreg:SI (reg:DI xx)) (...))
1167 e.g. Our code has created two webs for this, as it should. Unfortunately,
1168 as the REG reference is only one time in the RTL we can't color
1169 both webs different (arguably this also would be wrong for a real
1170 read-mod-write instruction), so we must reconnect such webs. */
1173 connect_rmw_web_parts (struct df *df)
1177 for (i = 0; i < df->use_id; i++)
1179 struct web_part *wp1 = &web_parts[df->def_id + i];
1181 struct df_link *link;
1184 /* If it's an uninitialized web, we don't want to connect it to others,
1185 as the read cycle in read-mod-write had probably no effect. */
1186 if (find_web_part (wp1) >= &web_parts[df->def_id])
1188 reg = DF_REF_REAL_REG (wp1->ref);
1189 link = DF_INSN_DEFS (df, DF_REF_INSN (wp1->ref));
1190 for (; link; link = link->next)
1191 if (reg == DF_REF_REAL_REG (link->ref))
1193 struct web_part *wp2 = &web_parts[DF_REF_ID (link->ref)];
1194 union_web_parts (wp1, wp2);
1199 /* Deletes all hardregs from *S which are not allowed for MODE. */
1202 prune_hardregs_for_mode (HARD_REG_SET *s, enum machine_mode mode)
1204 AND_HARD_REG_SET (*s, hardregs_for_mode[(int) mode]);
1207 /* Initialize the members of a web, which are deducible from REG. */
1210 init_one_web_common (struct web *web, rtx reg)
1212 gcc_assert (REG_P (reg));
1213 /* web->id isn't initialized here. */
1214 web->regno = REGNO (reg);
1218 web->dlink = ra_calloc (sizeof (struct dlist));
1219 DLIST_WEB (web->dlink) = web;
1222 the former (superunion) doesn't constrain the graph enough. E.g.
1223 on x86 QImode _requires_ QI_REGS, but as alternate class usually
1224 GENERAL_REGS is given. So the graph is not constrained enough,
1225 thinking it has more freedom then it really has, which leads
1226 to repeated spill tryings. OTOH the latter (only using preferred
1227 class) is too constrained, as normally (e.g. with all SImode
1228 pseudos), they can be allocated also in the alternate class.
1229 What we really want, are the _exact_ hard regs allowed, not
1230 just a class. Later. */
1231 /*web->regclass = reg_class_superunion
1232 [reg_preferred_class (web->regno)]
1233 [reg_alternate_class (web->regno)];*/
1234 /*web->regclass = reg_preferred_class (web->regno);*/
1235 web->regclass = reg_class_subunion
1236 [reg_preferred_class (web->regno)] [reg_alternate_class (web->regno)];
1237 web->regclass = reg_preferred_class (web->regno);
1238 if (web->regno < FIRST_PSEUDO_REGISTER)
1240 web->color = web->regno;
1241 put_web (web, PRECOLORED);
1242 web->num_conflicts = UINT_MAX;
1243 web->add_hardregs = 0;
1244 CLEAR_HARD_REG_SET (web->usable_regs);
1245 SET_HARD_REG_BIT (web->usable_regs, web->regno);
1246 web->num_freedom = 1;
1250 HARD_REG_SET alternate;
1252 put_web (web, INITIAL);
1253 /* add_hardregs is wrong in multi-length classes, e.g.
1254 using a DFmode pseudo on x86 can result in class FLOAT_INT_REGS,
1255 where, if it finally is allocated to GENERAL_REGS it needs two,
1256 if allocated to FLOAT_REGS only one hardreg. XXX */
1258 CLASS_MAX_NREGS (web->regclass, PSEUDO_REGNO_MODE (web->regno)) - 1;
1259 web->num_conflicts = 0 * web->add_hardregs;
1260 COPY_HARD_REG_SET (web->usable_regs,
1261 reg_class_contents[reg_preferred_class (web->regno)]);
1262 COPY_HARD_REG_SET (alternate,
1263 reg_class_contents[reg_alternate_class (web->regno)]);
1264 IOR_HARD_REG_SET (web->usable_regs, alternate);
1265 /*IOR_HARD_REG_SET (web->usable_regs,
1266 reg_class_contents[reg_alternate_class
1268 AND_COMPL_HARD_REG_SET (web->usable_regs, never_use_colors);
1269 prune_hardregs_for_mode (&web->usable_regs,
1270 PSEUDO_REGNO_MODE (web->regno));
1271 #ifdef CANNOT_CHANGE_MODE_CLASS
1272 if (web->mode_changed)
1273 AND_COMPL_HARD_REG_SET (web->usable_regs, invalid_mode_change_regs);
1275 web->num_freedom = hard_regs_count (web->usable_regs);
1276 web->num_freedom -= web->add_hardregs;
1277 gcc_assert (web->num_freedom);
1279 COPY_HARD_REG_SET (web->orig_usable_regs, web->usable_regs);
1282 /* Initializes WEBs members from REG or zero them. */
1285 init_one_web (struct web *web, rtx reg)
1287 memset (web, 0, sizeof (struct web));
1288 init_one_web_common (web, reg);
1289 web->useless_conflicts = BITMAP_XMALLOC ();
1292 /* WEB is an old web, meaning it came from the last pass, and got a
1293 color. We want to remember some of it's info, so zero only some
1297 reinit_one_web (struct web *web, rtx reg)
1299 web->old_color = web->color + 1;
1300 init_one_web_common (web, reg);
1301 web->span_deaths = 0;
1302 web->spill_temp = 0;
1303 web->orig_spill_temp = 0;
1304 web->use_my_regs = 0;
1305 web->spill_cost = 0;
1306 web->was_spilled = 0;
1307 web->is_coalesced = 0;
1308 web->artificial = 0;
1309 web->live_over_abnormal = 0;
1310 web->mode_changed = 0;
1311 web->subreg_stripped = 0;
1312 web->move_related = 0;
1314 web->target_of_spilled_move = 0;
1315 web->num_aliased = 0;
1316 if (web->type == PRECOLORED)
1320 web->orig_spill_cost = 0;
1322 CLEAR_HARD_REG_SET (web->bias_colors);
1323 CLEAR_HARD_REG_SET (web->prefer_colors);
1324 web->reg_rtx = NULL;
1325 web->stack_slot = NULL;
1326 web->pattern = NULL;
1328 gcc_assert (!web->moves);
1329 gcc_assert (web->useless_conflicts);
1332 /* Insert and returns a subweb corresponding to REG into WEB (which
1333 becomes its super web). It must not exist already. */
1336 add_subweb (struct web *web, rtx reg)
1339 gcc_assert (GET_CODE (reg) == SUBREG);
1340 w = xmalloc (sizeof (struct web));
1341 /* Copy most content from parent-web. */
1343 /* And initialize the private stuff. */
1345 w->add_hardregs = CLASS_MAX_NREGS (web->regclass, GET_MODE (reg)) - 1;
1346 w->num_conflicts = 0 * w->add_hardregs;
1350 w->parent_web = web;
1351 w->subreg_next = web->subreg_next;
1352 web->subreg_next = w;
1356 /* Similar to add_subweb(), but instead of relying on a given SUBREG,
1357 we have just a size and an offset of the subpart of the REG rtx.
1358 In difference to add_subweb() this marks the new subweb as artificial. */
1361 add_subweb_2 (struct web *web, unsigned int size_word)
1363 /* To get a correct mode for the to be produced subreg, we don't want to
1364 simply do a mode_for_size() for the mode_class of the whole web.
1365 Suppose we deal with a CDImode web, but search for a 8 byte part.
1366 Now mode_for_size() would only search in the class MODE_COMPLEX_INT
1367 and would find CSImode which probably is not what we want. Instead
1368 we want DImode, which is in a completely other class. For this to work
1369 we instead first search the already existing subwebs, and take
1370 _their_ modeclasses as base for a search for ourself. */
1371 rtx ref_rtx = (web->subreg_next ? web->subreg_next : web)->orig_x;
1372 unsigned int size = BYTE_LENGTH (size_word) * BITS_PER_UNIT;
1373 enum machine_mode mode;
1374 mode = mode_for_size (size, GET_MODE_CLASS (GET_MODE (ref_rtx)), 0);
1375 if (mode == BLKmode)
1376 mode = mode_for_size (size, MODE_INT, 0);
1377 gcc_assert (mode != BLKmode);
1378 web = add_subweb (web, gen_rtx_SUBREG (mode, web->orig_x,
1379 BYTE_BEGIN (size_word)));
1380 web->artificial = 1;
1384 /* Initialize all the web parts we are going to need. */
1387 init_web_parts (struct df *df)
1392 for (no = 0; no < df->def_id; no++)
1396 gcc_assert (no >= last_def_id || web_parts[no].ref == df->defs[no]);
1397 web_parts[no].ref = df->defs[no];
1398 /* Uplink might be set from the last iteration. */
1399 if (!web_parts[no].uplink)
1403 /* The last iteration might have left .ref set, while df_analyze()
1404 removed that ref (due to a removed copy insn) from the df->defs[]
1405 array. As we don't check for that in realloc_web_parts()
1407 web_parts[no].ref = NULL;
1409 for (no = 0; no < df->use_id; no++)
1413 gcc_assert (no >= last_use_id
1414 || web_parts[no + df->def_id].ref == df->uses[no]);
1415 web_parts[no + df->def_id].ref = df->uses[no];
1416 if (!web_parts[no + df->def_id].uplink)
1420 web_parts[no + df->def_id].ref = NULL;
1423 /* We want to have only one web for each precolored register. */
1424 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
1426 struct web_part *r1 = NULL;
1427 struct df_link *link;
1428 /* Here once was a test, if there is any DEF at all, and only then to
1429 merge all the parts. This was incorrect, we really also want to have
1430 only one web-part for hardregs, even if there is no explicit DEF. */
1431 /* Link together all defs... */
1432 for (link = df->regs[regno].defs; link; link = link->next)
1435 struct web_part *r2 = &web_parts[DF_REF_ID (link->ref)];
1439 r1 = union_web_parts (r1, r2);
1441 /* ... and all uses. */
1442 for (link = df->regs[regno].uses; link; link = link->next)
1445 struct web_part *r2 = &web_parts[df->def_id
1446 + DF_REF_ID (link->ref)];
1450 r1 = union_web_parts (r1, r2);
1455 /* In case we want to remember the conflict list of a WEB, before adding
1456 new conflicts, we copy it here to orig_conflict_list. */
1459 copy_conflict_list (struct web *web)
1461 struct conflict_link *cl;
1462 gcc_assert (!web->orig_conflict_list);
1463 gcc_assert (!web->have_orig_conflicts);
1464 web->have_orig_conflicts = 1;
1465 for (cl = web->conflict_list; cl; cl = cl->next)
1467 struct conflict_link *ncl;
1468 ncl = ra_alloc (sizeof *ncl);
1471 ncl->next = web->orig_conflict_list;
1472 web->orig_conflict_list = ncl;
1475 struct sub_conflict *sl, *nsl;
1476 for (sl = cl->sub; sl; sl = sl->next)
1478 nsl = ra_alloc (sizeof *nsl);
1481 nsl->next = ncl->sub;
1488 /* Possibly add an edge from web FROM to TO marking a conflict between
1489 those two. This is one half of marking a complete conflict, which notes
1490 in FROM, that TO is a conflict. Adding TO to FROM's conflicts might
1491 make other conflicts superfluous, because the current TO overlaps some web
1492 already being in conflict with FROM. In this case the smaller webs are
1493 deleted from the conflict list. Likewise if TO is overlapped by a web
1494 already in the list, it isn't added at all. Note, that this can only
1495 happen, if SUBREG webs are involved. */
1498 add_conflict_edge (struct web *from, struct web *to)
1500 if (from->type != PRECOLORED)
1502 struct web *pfrom = find_web_for_subweb (from);
1503 struct web *pto = find_web_for_subweb (to);
1504 struct sub_conflict *sl;
1505 struct conflict_link *cl = pfrom->conflict_list;
1508 /* This can happen when subwebs of one web conflict with each
1509 other. In live_out_1() we created such conflicts between yet
1510 undefined webparts and defs of parts which didn't overlap with the
1511 undefined bits. Then later they nevertheless could have merged into
1512 one web, and then we land here. */
1515 if (remember_conflicts && !pfrom->have_orig_conflicts)
1516 copy_conflict_list (pfrom);
1517 if (!TEST_BIT (sup_igraph, (pfrom->id * num_webs + pto->id)))
1519 cl = ra_alloc (sizeof (*cl));
1522 cl->next = pfrom->conflict_list;
1523 pfrom->conflict_list = cl;
1524 if (pto->type != SELECT && pto->type != COALESCED)
1525 pfrom->num_conflicts += 1 + pto->add_hardregs;
1526 SET_BIT (sup_igraph, (pfrom->id * num_webs + pto->id));
1530 /* We don't need to test for cl==NULL, because at this point
1531 a cl with cl->t==pto is guaranteed to exist. */
1532 while (cl->t != pto)
1534 if (pfrom != from || pto != to)
1536 /* This is a subconflict which should be added.
1537 If we inserted cl in this invocation, we really need to add this
1538 subconflict. If we did _not_ add it here, we only add the
1539 subconflict, if cl already had subconflicts, because otherwise
1540 this indicated, that the whole webs already conflict, which
1541 means we are not interested in this subconflict. */
1542 if (!may_delete || cl->sub != NULL)
1544 sl = ra_alloc (sizeof (*sl));
1552 /* pfrom == from && pto == to means, that we are not interested
1553 anymore in the subconflict list for this pair, because anyway
1554 the whole webs conflict. */
1559 /* Record a conflict between two webs, if we haven't recorded it
1563 record_conflict (struct web *web1, struct web *web2)
1565 unsigned int id1 = web1->id, id2 = web2->id;
1566 unsigned int index = igraph_index (id1, id2);
1567 /* Trivial non-conflict or already recorded conflict. */
1568 if (web1 == web2 || TEST_BIT (igraph, index))
1570 gcc_assert (id1 != id2);
1571 /* As fixed_regs are no targets for allocation, conflicts with them
1573 if ((web1->regno < FIRST_PSEUDO_REGISTER && fixed_regs[web1->regno])
1574 || (web2->regno < FIRST_PSEUDO_REGISTER && fixed_regs[web2->regno]))
1576 /* Conflicts with hardregs, which are not even a candidate
1577 for this pseudo are also pointless. */
1578 if ((web1->type == PRECOLORED
1579 && ! TEST_HARD_REG_BIT (web2->usable_regs, web1->regno))
1580 || (web2->type == PRECOLORED
1581 && ! TEST_HARD_REG_BIT (web1->usable_regs, web2->regno)))
1583 /* Similar if the set of possible hardregs don't intersect. This iteration
1584 those conflicts are useless (and would make num_conflicts wrong, because
1585 num_freedom is calculated from the set of possible hardregs).
1586 But in presence of spilling and incremental building of the graph we
1587 need to note all uses of webs conflicting with the spilled ones.
1588 Because the set of possible hardregs can change in the next round for
1589 spilled webs, we possibly have then conflicts with webs which would
1590 be excluded now (because then hardregs intersect). But we actually
1591 need to check those uses, and to get hold of them, we need to remember
1592 also webs conflicting with this one, although not conflicting in this
1593 round because of non-intersecting hardregs. */
1594 if (web1->type != PRECOLORED && web2->type != PRECOLORED
1595 && ! hard_regs_intersect_p (&web1->usable_regs, &web2->usable_regs))
1597 struct web *p1 = find_web_for_subweb (web1);
1598 struct web *p2 = find_web_for_subweb (web2);
1599 /* We expect these to be rare enough to justify bitmaps. And because
1600 we have only a special use for it, we note only the superwebs. */
1601 bitmap_set_bit (p1->useless_conflicts, p2->id);
1602 bitmap_set_bit (p2->useless_conflicts, p1->id);
1605 SET_BIT (igraph, index);
1606 add_conflict_edge (web1, web2);
1607 add_conflict_edge (web2, web1);
1610 /* For each web W this produces the missing subwebs Wx, such that it's
1611 possible to exactly specify (W-Wy) for all already existing subwebs Wy. */
1614 build_inverse_webs (struct web *web)
1616 struct web *sweb = web->subreg_next;
1617 unsigned HOST_WIDE_INT undef;
1619 undef = rtx_to_undefined (web->orig_x);
1620 for (; sweb; sweb = sweb->subreg_next)
1621 /* Only create inverses of non-artificial webs. */
1622 if (!sweb->artificial)
1624 unsigned HOST_WIDE_INT bits;
1625 bits = undef & ~ rtx_to_undefined (sweb->orig_x);
1628 unsigned int size_word = undef_to_size_word (web->orig_x, &bits);
1629 if (!find_subweb_2 (web, size_word))
1630 add_subweb_2 (web, size_word);
1635 /* Copies the content of WEB to a new one, and link it into WL.
1636 Used for consistency checking. */
1639 copy_web (struct web *web, struct web_link **wl)
1641 struct web *cweb = xmalloc (sizeof *cweb);
1642 struct web_link *link = ra_alloc (sizeof *link);
1649 /* Given a list of webs LINK, compare the content of the webs therein
1650 with the global webs of the same ID. For consistency checking. */
1653 compare_and_free_webs (struct web_link **link)
1655 struct web_link *wl;
1656 for (wl = *link; wl; wl = wl->next)
1658 struct web *web1 = wl->web;
1659 struct web *web2 = ID2WEB (web1->id);
1660 gcc_assert (web1->regno == web2->regno);
1661 gcc_assert (web1->mode_changed == web2->mode_changed);
1662 gcc_assert (rtx_equal_p (web1->orig_x, web2->orig_x));
1663 gcc_assert (web1->type == web2->type);
1664 if (web1->type != PRECOLORED)
1668 /* Only compare num_defs/num_uses with non-hardreg webs.
1669 E.g. the number of uses of the framepointer changes due to
1670 inserting spill code. */
1671 gcc_assert (web1->num_uses == web2->num_uses);
1672 gcc_assert (web1->num_defs == web2->num_defs);
1673 /* Similarly, if the framepointer was unreferenced originally
1674 but we added spills, these fields may not match. */
1675 gcc_assert (web1->crosses_call == web2->crosses_call);
1676 gcc_assert (web1->live_over_abnormal == web2->live_over_abnormal);
1677 for (i = 0; i < web1->num_defs; i++)
1678 gcc_assert (web1->defs[i] == web2->defs[i]);
1679 for (i = 0; i < web1->num_uses; i++)
1680 gcc_assert (web1->uses[i] == web2->uses[i]);
1682 if (web1->type == PRECOLORED)
1694 /* Setup and fill uses[] and defs[] arrays of the webs. */
1697 init_webs_defs_uses (void)
1700 for (d = WEBS(INITIAL); d; d = d->next)
1702 struct web *web = DLIST_WEB (d);
1703 unsigned int def_i, use_i;
1704 struct df_link *link;
1707 if (web->type == PRECOLORED)
1709 web->num_defs = web->num_uses = 0;
1713 web->defs = xmalloc (web->num_defs * sizeof (web->defs[0]));
1715 web->uses = xmalloc (web->num_uses * sizeof (web->uses[0]));
1717 for (link = web->temp_refs; link; link = link->next)
1719 if (DF_REF_REG_DEF_P (link->ref))
1720 web->defs[def_i++] = link->ref;
1722 web->uses[use_i++] = link->ref;
1724 web->temp_refs = NULL;
1725 gcc_assert (def_i == web->num_defs);
1726 gcc_assert (use_i == web->num_uses);
1730 /* Called by parts_to_webs(). This creates (or recreates) the webs (and
1731 subwebs) from web parts, gives them IDs (only to super webs), and sets
1732 up use2web and def2web arrays. */
1735 parts_to_webs_1 (struct df *df, struct web_link **copy_webs,
1736 struct df_link *all_refs)
1739 unsigned int webnum;
1740 unsigned int def_id = df->def_id;
1741 unsigned int use_id = df->use_id;
1742 struct web_part *wp_first_use = &web_parts[def_id];
1744 /* For each root web part: create and initialize a new web,
1745 setup def2web[] and use2web[] for all defs and uses, and
1746 id2web for all new webs. */
1749 for (i = 0; i < def_id + use_id; i++)
1751 struct web *subweb, *web = 0; /* Initialize web to silence warnings. */
1752 struct web_part *wp = &web_parts[i];
1753 struct ref *ref = wp->ref;
1754 unsigned int ref_id;
1761 all_refs[i].ref = ref;
1762 reg = DF_REF_REG (ref);
1765 /* If we have a web part root, create a new web. */
1766 unsigned int newid = ~(unsigned)0;
1767 unsigned int old_web = 0;
1769 /* In the first pass, there are no old webs, so unconditionally
1770 allocate a new one. */
1773 web = xmalloc (sizeof (struct web));
1774 newid = last_num_webs++;
1775 init_one_web (web, GET_CODE (reg) == SUBREG
1776 ? SUBREG_REG (reg) : reg);
1778 /* Otherwise, we look for an old web. */
1781 /* Remember, that use2web == def2web + def_id.
1782 Ergo is def2web[i] == use2web[i - def_id] for i >= def_id.
1783 So we only need to look into def2web[] array.
1784 Try to look at the web, which formerly belonged to this
1787 /* Or which belonged to this hardreg. */
1788 if (!web && DF_REF_REGNO (ref) < FIRST_PSEUDO_REGISTER)
1789 web = hardreg2web[DF_REF_REGNO (ref)];
1792 /* If we found one, reuse it. */
1793 web = find_web_for_subweb (web);
1794 remove_list (web->dlink, &WEBS(INITIAL));
1796 copy_web (web, copy_webs);
1800 /* Otherwise use a new one. First from the free list. */
1802 web = DLIST_WEB (pop_list (&WEBS(FREE)));
1805 /* Else allocate a new one. */
1806 web = xmalloc (sizeof (struct web));
1807 newid = last_num_webs++;
1810 /* The id is zeroed in init_one_web(). */
1811 if (newid == ~(unsigned)0)
1814 reinit_one_web (web, GET_CODE (reg) == SUBREG
1815 ? SUBREG_REG (reg) : reg);
1817 init_one_web (web, GET_CODE (reg) == SUBREG
1818 ? SUBREG_REG (reg) : reg);
1819 web->old_web = (old_web && web->type != PRECOLORED) ? 1 : 0;
1821 web->span_deaths = wp->spanned_deaths;
1822 web->crosses_call = wp->crosses_call;
1824 web->temp_refs = NULL;
1826 if (web->regno < FIRST_PSEUDO_REGISTER)
1828 if (!hardreg2web[web->regno])
1829 hardreg2web[web->regno] = web;
1831 gcc_assert (hardreg2web[web->regno] == web);
1835 /* If this reference already had a web assigned, we are done.
1836 This test better is equivalent to the web being an old web.
1837 Otherwise something is screwed. (This is tested) */
1838 if (def2web[i] != NULL)
1841 web = find_web_for_subweb (web);
1842 /* But if this ref includes a mode change, or was a use live
1843 over an abnormal call, set appropriate flags in the web. */
1844 if ((DF_REF_FLAGS (ref) & DF_REF_MODE_CHANGE) != 0
1845 && web->regno >= FIRST_PSEUDO_REGISTER)
1846 web->mode_changed = 1;
1847 if ((DF_REF_FLAGS (ref) & DF_REF_STRIPPED) != 0
1848 && web->regno >= FIRST_PSEUDO_REGISTER)
1849 web->subreg_stripped = 1;
1851 && TEST_BIT (live_over_abnormal, ref_id))
1852 web->live_over_abnormal = 1;
1853 /* And check, that it's not a newly allocated web. This would be
1854 an inconsistency. */
1855 gcc_assert (web->old_web);
1856 gcc_assert (web->type != PRECOLORED);
1859 /* In case this was no web part root, we need to initialize WEB
1860 from the ref2web array belonging to the root. */
1863 struct web_part *rwp = find_web_part (wp);
1864 unsigned int j = DF_REF_ID (rwp->ref);
1865 if (rwp < wp_first_use)
1869 web = find_web_for_subweb (web);
1872 /* Remember all references for a web in a single linked list. */
1873 all_refs[i].next = web->temp_refs;
1874 web->temp_refs = &all_refs[i];
1876 /* And the test, that if def2web[i] was NULL above, that we are _not_
1878 gcc_assert (!web->old_web || web->type == PRECOLORED);
1880 /* Possible create a subweb, if this ref was a subreg. */
1881 if (GET_CODE (reg) == SUBREG)
1883 subweb = find_subweb (web, reg);
1886 subweb = add_subweb (web, reg);
1887 gcc_assert (!web->old_web);
1893 /* And look, if the ref involves an invalid mode change. */
1894 if ((DF_REF_FLAGS (ref) & DF_REF_MODE_CHANGE) != 0
1895 && web->regno >= FIRST_PSEUDO_REGISTER)
1896 web->mode_changed = 1;
1897 if ((DF_REF_FLAGS (ref) & DF_REF_STRIPPED) != 0
1898 && web->regno >= FIRST_PSEUDO_REGISTER)
1899 web->subreg_stripped = 1;
1901 /* Setup def2web, or use2web, and increment num_defs or num_uses. */
1904 /* Some sanity checks. */
1907 struct web *compare = def2web[i];
1908 if (i < last_def_id)
1909 gcc_assert (!web->old_web || compare == subweb);
1910 gcc_assert (web->old_web || !compare);
1911 gcc_assert (!compare || compare == subweb);
1913 def2web[i] = subweb;
1920 struct web *compare = use2web[ref_id];
1922 gcc_assert (ref_id >= last_use_id
1923 || !web->old_web || compare == subweb);
1924 gcc_assert (web->old_web || !compare);
1925 gcc_assert (!compare || compare == subweb);
1927 use2web[ref_id] = subweb;
1929 if (TEST_BIT (live_over_abnormal, ref_id))
1930 web->live_over_abnormal = 1;
1934 /* We better now have exactly as many webs as we had web part roots. */
1935 gcc_assert (webnum == num_webs);
1940 /* This builds full webs out of web parts, without relating them to each
1941 other (i.e. without creating the conflict edges). */
1944 parts_to_webs (struct df *df)
1947 unsigned int webnum;
1948 struct web_link *copy_webs = NULL;
1950 struct df_link *all_refs;
1953 /* First build webs and ordinary subwebs. */
1954 all_refs = xcalloc (df->def_id + df->use_id, sizeof (all_refs[0]));
1955 webnum = parts_to_webs_1 (df, ©_webs, all_refs);
1957 /* Setup the webs for hardregs which are still missing (weren't
1958 mentioned in the code). */
1959 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1960 if (!hardreg2web[i])
1962 struct web *web = xmalloc (sizeof (struct web));
1963 init_one_web (web, gen_rtx_REG (reg_raw_mode[i], i));
1964 web->id = last_num_webs++;
1965 hardreg2web[web->regno] = web;
1967 num_webs = last_num_webs;
1969 /* Now create all artificial subwebs, i.e. those, which do
1970 not correspond to a real subreg in the current function's RTL, but
1971 which nevertheless is a target of a conflict.
1972 XXX we need to merge this loop with the one above, which means, we need
1973 a way to later override the artificiality. Beware: currently
1974 add_subweb_2() relies on the existence of normal subwebs for deducing
1975 a sane mode to use for the artificial subwebs. */
1976 for (i = 0; i < df->def_id + df->use_id; i++)
1978 struct web_part *wp = &web_parts[i];
1979 struct tagged_conflict *cl;
1981 if (wp->uplink || !wp->ref)
1983 gcc_assert (!wp->sub_conflicts);
1987 web = find_web_for_subweb (web);
1988 for (cl = wp->sub_conflicts; cl; cl = cl->next)
1989 if (!find_subweb_2 (web, cl->size_word))
1990 add_subweb_2 (web, cl->size_word);
1993 /* And now create artificial subwebs needed for representing the inverse
1994 of some subwebs. This also gives IDs to all subwebs. */
1995 webnum = last_num_webs;
1996 for (d = WEBS(INITIAL); d; d = d->next)
1998 struct web *web = DLIST_WEB (d);
1999 if (web->subreg_next)
2002 build_inverse_webs (web);
2003 for (sweb = web->subreg_next; sweb; sweb = sweb->subreg_next)
2004 sweb->id = webnum++;
2008 /* Now that everyone has an ID, we can setup the id2web array. */
2009 id2web = xcalloc (webnum, sizeof (id2web[0]));
2010 for (d = WEBS(INITIAL); d; d = d->next)
2012 struct web *web = DLIST_WEB (d);
2013 ID2WEB (web->id) = web;
2014 for (web = web->subreg_next; web; web = web->subreg_next)
2015 ID2WEB (web->id) = web;
2017 num_subwebs = webnum - last_num_webs;
2018 num_allwebs = num_webs + num_subwebs;
2019 num_webs += num_subwebs;
2021 /* Allocate and clear the conflict graph bitmaps. */
2022 igraph = sbitmap_alloc (num_webs * num_webs / 2);
2023 sup_igraph = sbitmap_alloc (num_webs * num_webs);
2024 sbitmap_zero (igraph);
2025 sbitmap_zero (sup_igraph);
2027 /* Distribute the references to their webs. */
2028 init_webs_defs_uses ();
2029 /* And do some sanity checks if old webs, and those recreated from the
2030 really are the same. */
2031 compare_and_free_webs (©_webs);
2035 /* This deletes all conflicts to and from webs which need to be renewed
2036 in this pass of the allocator, i.e. those which were spilled in the
2037 last pass. Furthermore it also rebuilds the bitmaps for the remaining
2041 reset_conflicts (void)
2044 bitmap newwebs = BITMAP_XMALLOC ();
2045 for (i = 0; i < num_webs - num_subwebs; i++)
2047 struct web *web = ID2WEB (i);
2048 /* Hardreg webs and non-old webs are new webs (which
2049 need rebuilding). */
2050 if (web->type == PRECOLORED || !web->old_web)
2051 bitmap_set_bit (newwebs, web->id);
2054 for (i = 0; i < num_webs - num_subwebs; i++)
2056 struct web *web = ID2WEB (i);
2057 struct conflict_link *cl;
2058 struct conflict_link **pcl;
2059 pcl = &(web->conflict_list);
2061 /* First restore the conflict list to be like it was before
2063 if (web->have_orig_conflicts)
2065 web->conflict_list = web->orig_conflict_list;
2066 web->orig_conflict_list = NULL;
2068 gcc_assert (!web->orig_conflict_list);
2070 /* New non-precolored webs, have no conflict list. */
2071 if (web->type != PRECOLORED && !web->old_web)
2074 /* Useless conflicts will be rebuilt completely. But check
2075 for cleanliness, as the web might have come from the
2077 gcc_assert (bitmap_first_set_bit (web->useless_conflicts) < 0);
2081 /* Useless conflicts with new webs will be rebuilt if they
2083 bitmap_operation (web->useless_conflicts, web->useless_conflicts,
2084 newwebs, BITMAP_AND_COMPL);
2085 /* Go through all conflicts, and retain those to old webs. */
2086 for (cl = web->conflict_list; cl; cl = cl->next)
2088 if (cl->t->old_web || cl->t->type == PRECOLORED)
2093 /* Also restore the entries in the igraph bitmaps. */
2094 web->num_conflicts += 1 + cl->t->add_hardregs;
2095 SET_BIT (sup_igraph, (web->id * num_webs + cl->t->id));
2096 /* No subconflicts mean full webs conflict. */
2098 SET_BIT (igraph, igraph_index (web->id, cl->t->id));
2100 /* Else only the parts in cl->sub must be in the
2103 struct sub_conflict *sl;
2104 for (sl = cl->sub; sl; sl = sl->next)
2105 SET_BIT (igraph, igraph_index (sl->s->id, sl->t->id));
2111 web->have_orig_conflicts = 0;
2113 BITMAP_XFREE (newwebs);
2116 /* For each web check it's num_conflicts member against that
2117 number, as calculated from scratch from all neighbors. */
2121 check_conflict_numbers (void)
2124 for (i = 0; i < num_webs; i++)
2126 struct web *web = ID2WEB (i);
2127 int new_conf = 0 * web->add_hardregs;
2128 struct conflict_link *cl;
2129 for (cl = web->conflict_list; cl; cl = cl->next)
2130 if (cl->t->type != SELECT && cl->t->type != COALESCED)
2131 new_conf += 1 + cl->t->add_hardregs;
2132 gcc_assert (web->type == PRECOLORED || new_conf == web->num_conflicts);
2137 /* Convert the conflicts between web parts to conflicts between full webs.
2139 This can't be done in parts_to_webs(), because for recording conflicts
2140 between webs we need to know their final usable_regs set, which is used
2141 to discard non-conflicts (between webs having no hard reg in common).
2142 But this is set for spill temporaries only after the webs itself are
2143 built. Until then the usable_regs set is based on the pseudo regno used
2144 in this web, which may contain far less registers than later determined.
2145 This would result in us loosing conflicts (due to record_conflict()
2146 thinking that a web can only be allocated to the current usable_regs,
2147 whereas later this is extended) leading to colorings, where some regs which
2148 in reality conflict get the same color. */
2151 conflicts_between_webs (struct df *df)
2155 bitmap ignore_defs = BITMAP_XMALLOC ();
2156 unsigned int have_ignored;
2157 unsigned int *pass_cache = xcalloc (num_webs, sizeof (int));
2158 unsigned int pass = 0;
2163 /* It is possible, that in the conflict bitmaps still some defs I are noted,
2164 which have web_parts[I].ref being NULL. This can happen, when from the
2165 last iteration the conflict bitmap for this part wasn't deleted, but a
2166 conflicting move insn was removed. It's DEF is still in the conflict
2167 bitmap, but it doesn't exist anymore in df->defs. To not have to check
2168 it in the tight loop below, we instead remember the ID's of them in a
2169 bitmap, and loop only over IDs which are not in it. */
2170 for (i = 0; i < df->def_id; i++)
2171 if (web_parts[i].ref == NULL)
2172 bitmap_set_bit (ignore_defs, i);
2173 have_ignored = (bitmap_first_set_bit (ignore_defs) >= 0);
2175 /* Now record all conflicts between webs. Note that we only check
2176 the conflict bitmaps of all defs. Conflict bitmaps are only in
2177 webpart roots. If they are in uses, those uses are roots, which
2178 means, that this is an uninitialized web, whose conflicts
2179 don't matter. Nevertheless for hardregs we also need to check uses.
2180 E.g. hardregs used for argument passing have no DEF in the RTL,
2181 but if they have uses, they indeed conflict with all DEFs they
2183 for (i = 0; i < df->def_id + df->use_id; i++)
2185 struct tagged_conflict *cl = web_parts[i].sub_conflicts;
2186 struct web *supweb1;
2189 && DF_REF_REGNO (web_parts[i].ref) >= FIRST_PSEUDO_REGISTER))
2191 supweb1 = def2web[i];
2192 supweb1 = find_web_for_subweb (supweb1);
2193 for (; cl; cl = cl->next)
2197 struct web *web1 = find_subweb_2 (supweb1, cl->size_word);
2201 bitmap_operation (cl->conflicts, cl->conflicts, ignore_defs,
2203 /* We reduce the number of calls to record_conflict() with this
2204 pass thing. record_conflict() itself also has some early-out
2205 optimizations, but here we can use the special properties of
2206 the loop (constant web1) to reduce that even more.
2207 We once used an sbitmap of already handled web indices,
2208 but sbitmaps are slow to clear and bitmaps are slow to
2209 set/test. The current approach needs more memory, but
2210 locality is large. */
2213 /* Note, that there are only defs in the conflicts bitset. */
2214 EXECUTE_IF_SET_IN_BITMAP (cl->conflicts, 0, j, bi)
2216 struct web *web2 = def2web[j];
2217 unsigned int id2 = web2->id;
2218 if (pass_cache[id2] != pass)
2220 pass_cache[id2] = pass;
2221 record_conflict (web1, web2);
2228 BITMAP_XFREE (ignore_defs);
2230 for (d = WEBS(INITIAL); d; d = d->next)
2232 struct web *web = DLIST_WEB (d);
2235 if (web->crosses_call)
2236 for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
2237 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, j))
2238 record_conflict (web, hardreg2web[j]);
2241 /* Pseudos can't go in stack regs if they are live at the beginning of
2242 a block that is reached by an abnormal edge. */
2243 if (web->live_over_abnormal)
2244 for (j = FIRST_STACK_REG; j <= LAST_STACK_REG; j++)
2245 record_conflict (web, hardreg2web[j]);
2250 /* Remember that a web was spilled, and change some characteristics
2254 remember_web_was_spilled (struct web *web)
2257 unsigned int found_size = 0;
2259 web->spill_temp = 1;
2261 /* From now on don't use reg_pref/alt_class (regno) anymore for
2262 this web, but instead usable_regs. We can't use spill_temp for
2263 this, as it might get reset later, when we are coalesced to a
2264 non-spill-temp. In that case we still want to use usable_regs. */
2265 web->use_my_regs = 1;
2267 /* We don't constrain spill temporaries in any way for now.
2268 It's wrong sometimes to have the same constraints or
2269 preferences as the original pseudo, esp. if they were very narrow.
2270 (E.g. there once was a reg wanting class AREG (only one register)
2271 without alternative class. As long, as also the spill-temps for
2272 this pseudo had the same constraints it was spilled over and over.
2273 Ideally we want some constraints also on spill-temps: Because they are
2274 not only loaded/stored, but also worked with, any constraints from insn
2275 alternatives needs applying. Currently this is dealt with by reload, as
2276 many other things, but at some time we want to integrate that
2277 functionality into the allocator. */
2278 if (web->regno >= max_normal_pseudo)
2280 COPY_HARD_REG_SET (web->usable_regs,
2281 reg_class_contents[reg_preferred_class (web->regno)]);
2282 IOR_HARD_REG_SET (web->usable_regs,
2283 reg_class_contents[reg_alternate_class (web->regno)]);
2286 COPY_HARD_REG_SET (web->usable_regs,
2287 reg_class_contents[(int) GENERAL_REGS]);
2288 AND_COMPL_HARD_REG_SET (web->usable_regs, never_use_colors);
2289 prune_hardregs_for_mode (&web->usable_regs, PSEUDO_REGNO_MODE (web->regno));
2290 #ifdef CANNOT_CHANGE_MODE_CLASS
2291 if (web->mode_changed)
2292 AND_COMPL_HARD_REG_SET (web->usable_regs, invalid_mode_change_regs);
2294 web->num_freedom = hard_regs_count (web->usable_regs);
2295 gcc_assert (web->num_freedom);
2296 COPY_HARD_REG_SET (web->orig_usable_regs, web->usable_regs);
2297 /* Now look for a class, which is subset of our constraints, to
2298 setup add_hardregs, and regclass for debug output. */
2299 web->regclass = NO_REGS;
2300 for (i = (int) ALL_REGS - 1; i > 0; i--)
2304 COPY_HARD_REG_SET (test, reg_class_contents[i]);
2305 AND_COMPL_HARD_REG_SET (test, never_use_colors);
2306 GO_IF_HARD_REG_SUBSET (test, web->usable_regs, found);
2309 /* Measure the actual number of bits which really are overlapping
2310 the target regset, not just the reg_class_size. */
2311 size = hard_regs_count (test);
2312 if (found_size < size)
2314 web->regclass = (enum reg_class) i;
2319 adjust = 0 * web->add_hardregs;
2321 CLASS_MAX_NREGS (web->regclass, PSEUDO_REGNO_MODE (web->regno)) - 1;
2322 web->num_freedom -= web->add_hardregs;
2323 gcc_assert (web->num_freedom);
2324 adjust -= 0 * web->add_hardregs;
2325 web->num_conflicts -= adjust;
2328 /* Look at each web, if it is used as spill web. Or better said,
2329 if it will be spillable in this pass. */
2332 detect_spill_temps (void)
2335 bitmap already = BITMAP_XMALLOC ();
2337 /* Detect webs used for spill temporaries. */
2338 for (d = WEBS(INITIAL); d; d = d->next)
2340 struct web *web = DLIST_WEB (d);
2342 /* Below only the detection of spill temporaries. We never spill
2343 precolored webs, so those can't be spill temporaries. The code above
2344 (remember_web_was_spilled) can't currently cope with hardregs
2346 if (web->regno < FIRST_PSEUDO_REGISTER)
2348 /* Uninitialized webs can't be spill-temporaries. */
2349 if (web->num_defs == 0)
2352 /* A web with only defs and no uses can't be spilled. Nevertheless
2353 it must get a color, as it takes away a register from all webs
2354 live at these defs. So we make it a short web. */
2355 if (web->num_uses == 0)
2356 web->spill_temp = 3;
2357 /* A web which was spilled last time, but for which no insns were
2358 emitted (can happen with IR spilling ignoring sometimes
2360 else if (web->changed)
2361 web->spill_temp = 1;
2362 /* A spill temporary has one def, one or more uses, all uses
2363 are in one insn, and either the def or use insn was inserted
2364 by the allocator. */
2365 /* XXX not correct currently. There might also be spill temps
2366 involving more than one def. Usually that's an additional
2367 clobber in the using instruction. We might also constrain
2368 ourself to that, instead of like currently marking all
2369 webs involving any spill insns at all. */
2373 int spill_involved = 0;
2374 for (i = 0; i < web->num_uses && !spill_involved; i++)
2375 if (DF_REF_INSN_UID (web->uses[i]) >= orig_max_uid)
2377 for (i = 0; i < web->num_defs && !spill_involved; i++)
2378 if (DF_REF_INSN_UID (web->defs[i]) >= orig_max_uid)
2381 if (spill_involved/* && ra_pass > 2*/)
2383 int num_deaths = web->span_deaths;
2384 /* Mark webs involving at least one spill insn as
2386 remember_web_was_spilled (web);
2387 /* Search for insns which define and use the web in question
2388 at the same time, i.e. look for rmw insns. If these insns
2389 are also deaths of other webs they might have been counted
2390 as such into web->span_deaths. But because of the rmw nature
2391 of this insn it is no point where a load/reload could be
2392 placed successfully (it would still conflict with the
2393 dead web), so reduce the number of spanned deaths by those
2394 insns. Note that sometimes such deaths are _not_ counted,
2395 so negative values can result. */
2396 bitmap_zero (already);
2397 for (i = 0; i < web->num_defs; i++)
2399 rtx insn = web->defs[i]->insn;
2400 if (TEST_BIT (insns_with_deaths, INSN_UID (insn))
2401 && !bitmap_bit_p (already, INSN_UID (insn)))
2404 bitmap_set_bit (already, INSN_UID (insn));
2405 /* Only decrement it once for each insn. */
2406 for (j = 0; j < web->num_uses; j++)
2407 if (web->uses[j]->insn == insn)
2414 /* But mark them specially if they could possibly be spilled,
2415 either because they cross some deaths (without the above
2416 mentioned ones) or calls. */
2417 if (web->crosses_call || num_deaths > 0)
2418 web->spill_temp = 1 * 2;
2420 /* A web spanning no deaths can't be spilled either. No loads
2421 would be created for it, ergo no defs. So the insns wouldn't
2422 change making the graph not easier to color. Make this also
2423 a short web. Don't do this if it crosses calls, as these are
2424 also points of reloads. */
2425 else if (web->span_deaths == 0 && !web->crosses_call)
2426 web->spill_temp = 3;
2428 web->orig_spill_temp = web->spill_temp;
2430 BITMAP_XFREE (already);
2433 /* Returns nonzero if the rtx MEM refers somehow to a stack location. */
2436 memref_is_stack_slot (rtx mem)
2438 rtx ad = XEXP (mem, 0);
2440 if (GET_CODE (ad) != PLUS || GET_CODE (XEXP (ad, 1)) != CONST_INT)
2443 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
2444 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])
2445 || x == stack_pointer_rtx)
2450 /* Returns nonzero, if rtx X somewhere contains any pseudo register. */
2453 contains_pseudo (rtx x)
2457 if (GET_CODE (x) == SUBREG)
2461 if (REGNO (x) >= FIRST_PSEUDO_REGISTER)
2467 fmt = GET_RTX_FORMAT (GET_CODE (x));
2468 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
2471 if (contains_pseudo (XEXP (x, i)))
2474 else if (fmt[i] == 'E')
2477 for (j = 0; j < XVECLEN (x, i); j++)
2478 if (contains_pseudo (XVECEXP (x, i, j)))
2484 /* Returns nonzero, if we are able to rematerialize something with
2485 value X. If it's not a general operand, we test if we can produce
2486 a valid insn which set a pseudo to that value, and that insn doesn't
2487 clobber anything. */
2489 static GTY(()) rtx remat_test_insn;
2491 want_to_remat (rtx x)
2493 int num_clobbers = 0;
2496 /* If this is a valid operand, we are OK. If it's VOIDmode, we aren't. */
2497 if (general_operand (x, GET_MODE (x)))
2500 /* Otherwise, check if we can make a valid insn from it. First initialize
2501 our test insn if we haven't already. */
2502 if (remat_test_insn == 0)
2505 = make_insn_raw (gen_rtx_SET (VOIDmode,
2506 gen_rtx_REG (word_mode,
2507 FIRST_PSEUDO_REGISTER * 2),
2509 NEXT_INSN (remat_test_insn) = PREV_INSN (remat_test_insn) = 0;
2512 /* Now make an insn like the one we would make when rematerializing
2513 the value X and see if valid. */
2514 PUT_MODE (SET_DEST (PATTERN (remat_test_insn)), GET_MODE (x));
2515 SET_SRC (PATTERN (remat_test_insn)) = x;
2516 /* XXX For now we don't allow any clobbers to be added, not just no
2517 hardreg clobbers. */
2518 return ((icode = recog (PATTERN (remat_test_insn), remat_test_insn,
2519 &num_clobbers)) >= 0
2520 && (num_clobbers == 0
2521 /*|| ! added_clobbers_hard_reg_p (icode)*/));
2524 /* Look at all webs, if they perhaps are rematerializable.
2525 They are, if all their defs are simple sets to the same value,
2526 and that value is simple enough, and want_to_remat() holds for it. */
2529 detect_remat_webs (void)
2532 for (d = WEBS(INITIAL); d; d = d->next)
2534 struct web *web = DLIST_WEB (d);
2537 /* Hardregs and useless webs aren't spilled -> no remat necessary.
2538 Defless webs obviously also can't be rematerialized. */
2539 if (web->regno < FIRST_PSEUDO_REGISTER || !web->num_defs
2542 for (i = 0; i < web->num_defs; i++)
2545 rtx set = single_set (insn = DF_REF_INSN (web->defs[i]));
2549 src = SET_SRC (set);
2550 /* When only subregs of the web are set it isn't easily
2551 rematerializable. */
2552 if (!rtx_equal_p (SET_DEST (set), web->orig_x))
2554 /* If we already have a pattern it must be equal to the current. */
2555 if (pat && !rtx_equal_p (pat, src))
2557 /* Don't do the expensive checks multiple times. */
2560 /* For now we allow only constant sources. */
2561 if ((CONSTANT_P (src)
2562 /* If the whole thing is stable already, it is a source for
2563 remat, no matter how complicated (probably all needed
2564 resources for it are live everywhere, and don't take
2565 additional register resources). */
2566 /* XXX Currently we can't use patterns which contain
2567 pseudos, _even_ if they are stable. The code simply isn't
2568 prepared for that. All those operands can't be spilled (or
2569 the dependent remat webs are not remat anymore), so they
2570 would be oldwebs in the next iteration. But currently
2571 oldwebs can't have their references changed. The
2572 incremental machinery barfs on that. */
2573 || (!rtx_unstable_p (src) && !contains_pseudo (src))
2574 /* Additionally also memrefs to stack-slots are useful, when
2575 we created them ourself. They might not have set their
2576 unchanging flag set, but nevertheless they are stable across
2577 the livetime in question. */
2579 && INSN_UID (insn) >= orig_max_uid
2580 && memref_is_stack_slot (src)))
2581 /* And we must be able to construct an insn without
2582 side-effects to actually load that value into a reg. */
2583 && want_to_remat (src))
2588 if (pat && i == web->num_defs)
2593 /* Determine the spill costs of all webs. */
2596 determine_web_costs (void)
2599 for (d = WEBS(INITIAL); d; d = d->next)
2601 unsigned int i, num_loads;
2602 int load_cost, store_cost;
2603 unsigned HOST_WIDE_INT w;
2604 struct web *web = DLIST_WEB (d);
2605 if (web->type == PRECOLORED)
2607 /* Get costs for one load/store. Note that we offset them by 1,
2608 because some patterns have a zero rtx_cost(), but we of course
2609 still need the actual load/store insns. With zero all those
2610 webs would be the same, no matter how often and where
2614 /* This web is rematerializable. Beware, we set store_cost to
2615 zero optimistically assuming, that we indeed don't emit any
2616 stores in the spill-code addition. This might be wrong if
2617 at the point of the load not all needed resources are
2618 available, in which case we emit a stack-based load, for
2619 which we in turn need the according stores. */
2620 load_cost = 1 + rtx_cost (web->pattern, 0);
2625 load_cost = 1 + MEMORY_MOVE_COST (GET_MODE (web->orig_x),
2627 store_cost = 1 + MEMORY_MOVE_COST (GET_MODE (web->orig_x),
2630 /* We create only loads at deaths, whose number is in span_deaths. */
2631 num_loads = MIN (web->span_deaths, web->num_uses);
2632 for (w = 0, i = 0; i < web->num_uses; i++)
2633 w += DF_REF_BB (web->uses[i])->frequency + 1;
2634 if (num_loads < web->num_uses)
2635 w = (w * num_loads + web->num_uses - 1) / web->num_uses;
2636 web->spill_cost = w * load_cost;
2639 for (w = 0, i = 0; i < web->num_defs; i++)
2640 w += DF_REF_BB (web->defs[i])->frequency + 1;
2641 web->spill_cost += w * store_cost;
2643 web->orig_spill_cost = web->spill_cost;
2647 /* Detect webs which are set in a conditional jump insn (possibly a
2648 decrement-and-branch type of insn), and mark them not to be
2649 spillable. The stores for them would need to be placed on edges,
2650 which destroys the CFG. (Somewhen we want to deal with that XXX) */
2653 detect_webs_set_in_cond_jump (void)
2657 if (JUMP_P (BB_END (bb)))
2659 struct df_link *link;
2660 for (link = DF_INSN_DEFS (df, BB_END (bb)); link; link = link->next)
2661 if (link->ref && DF_REF_REGNO (link->ref) >= FIRST_PSEUDO_REGISTER)
2663 struct web *web = def2web[DF_REF_ID (link->ref)];
2664 web->orig_spill_temp = web->spill_temp = 3;
2669 /* Second top-level function of this file.
2670 Converts the connected web parts to full webs. This means, it allocates
2671 all webs, and initializes all fields, including detecting spill
2672 temporaries. It does not distribute moves to their corresponding webs,
2676 make_webs (struct df *df)
2678 /* First build all the webs itself. They are not related with
2681 /* Now detect spill temporaries to initialize their usable_regs set. */
2682 detect_spill_temps ();
2683 detect_webs_set_in_cond_jump ();
2684 /* And finally relate them to each other, meaning to record all possible
2685 conflicts between webs (see the comment there). */
2686 conflicts_between_webs (df);
2687 detect_remat_webs ();
2688 determine_web_costs ();
2691 /* Distribute moves to the corresponding webs. */
2694 moves_to_webs (struct df *df)
2696 struct df_link *link;
2697 struct move_list *ml;
2699 /* Distribute all moves to their corresponding webs, making sure,
2700 each move is in a web maximally one time (happens on some strange
2702 for (ml = wl_moves; ml; ml = ml->next)
2704 struct move *m = ml->move;
2706 struct move_list *newml;
2711 /* Multiple defs/uses can happen in moves involving hard-regs in
2712 a wider mode. For those df.* creates use/def references for each
2713 real hard-reg involved. For coalescing we are interested in
2714 the smallest numbered hard-reg. */
2715 for (link = DF_INSN_DEFS (df, m->insn); link; link = link->next)
2718 web = def2web[DF_REF_ID (link->ref)];
2719 web = find_web_for_subweb (web);
2720 if (!m->target_web || web->regno < m->target_web->regno)
2721 m->target_web = web;
2723 for (link = DF_INSN_USES (df, m->insn); link; link = link->next)
2726 web = use2web[DF_REF_ID (link->ref)];
2727 web = find_web_for_subweb (web);
2728 if (!m->source_web || web->regno < m->source_web->regno)
2729 m->source_web = web;
2731 if (m->source_web && m->target_web
2732 /* If the usable_regs don't intersect we can't coalesce the two
2733 webs anyway, as this is no simple copy insn (it might even
2734 need an intermediate stack temp to execute this "copy" insn). */
2735 && hard_regs_intersect_p (&m->source_web->usable_regs,
2736 &m->target_web->usable_regs))
2738 if (!flag_ra_optimistic_coalescing)
2740 struct move_list *test = m->source_web->moves;
2741 for (; test && test->move != m; test = test->next);
2744 newml = ra_alloc (sizeof (struct move_list));
2746 newml->next = m->source_web->moves;
2747 m->source_web->moves = newml;
2749 test = m->target_web->moves;
2750 for (; test && test->move != m; test = test->next);
2753 newml = ra_alloc (sizeof (struct move_list));
2755 newml->next = m->target_web->moves;
2756 m->target_web->moves = newml;
2761 /* Delete this move. */
2766 /* Handle tricky asm insns.
2767 Supposed to create conflicts to hardregs which aren't allowed in
2768 the constraints. Doesn't actually do that, as it might confuse
2769 and constrain the allocator too much. */
2772 handle_asm_insn (struct df *df, rtx insn)
2774 const char *constraints[MAX_RECOG_OPERANDS];
2775 enum machine_mode operand_mode[MAX_RECOG_OPERANDS];
2776 int i, noperands, in_output;
2777 HARD_REG_SET clobbered, allowed, conflict;
2780 || (noperands = asm_noperands (PATTERN (insn))) < 0)
2782 pat = PATTERN (insn);
2783 CLEAR_HARD_REG_SET (clobbered);
2785 if (GET_CODE (pat) == PARALLEL)
2786 for (i = 0; i < XVECLEN (pat, 0); i++)
2788 rtx t = XVECEXP (pat, 0, i);
2789 if (GET_CODE (t) == CLOBBER && REG_P (XEXP (t, 0))
2790 && REGNO (XEXP (t, 0)) < FIRST_PSEUDO_REGISTER)
2791 SET_HARD_REG_BIT (clobbered, REGNO (XEXP (t, 0)));
2794 decode_asm_operands (pat, recog_data.operand, recog_data.operand_loc,
2795 constraints, operand_mode);
2797 for (i = 0; i < noperands; i++)
2799 const char *p = constraints[i];
2800 int cls = (int) NO_REGS;
2801 struct df_link *link;
2804 int nothing_allowed = 1;
2805 reg = recog_data.operand[i];
2807 /* Look, if the constraints apply to a pseudo reg, and not to
2809 while (GET_CODE (reg) == SUBREG
2810 || GET_CODE (reg) == ZERO_EXTRACT
2811 || GET_CODE (reg) == SIGN_EXTRACT
2812 || GET_CODE (reg) == STRICT_LOW_PART)
2813 reg = XEXP (reg, 0);
2814 if (!REG_P (reg) || REGNO (reg) < FIRST_PSEUDO_REGISTER)
2817 /* Search the web corresponding to this operand. We depend on
2818 that decode_asm_operands() places the output operands
2819 before the input operands. */
2823 link = df->insns[INSN_UID (insn)].defs;
2825 link = df->insns[INSN_UID (insn)].uses;
2826 while (link && link->ref && DF_REF_REAL_REG (link->ref) != reg)
2828 if (!link || !link->ref)
2830 gcc_assert (in_output);
2837 web = def2web[DF_REF_ID (link->ref)];
2839 web = use2web[DF_REF_ID (link->ref)];
2840 reg = DF_REF_REG (link->ref);
2842 /* Find the constraints, noting the allowed hardregs in allowed. */
2843 CLEAR_HARD_REG_SET (allowed);
2848 if (c == '\0' || c == ',' || c == '#')
2850 /* End of one alternative - mark the regs in the current
2851 class, and reset the class. */
2853 IOR_HARD_REG_SET (allowed, reg_class_contents[cls]);
2855 nothing_allowed = 0;
2860 } while (c != '\0' && c != ',');
2868 case '=': case '+': case '*': case '%': case '?': case '!':
2869 case '0': case '1': case '2': case '3': case '4': case 'm':
2870 case '<': case '>': case 'V': case 'o': case '&': case 'E':
2871 case 'F': case 's': case 'i': case 'n': case 'X': case 'I':
2872 case 'J': case 'K': case 'L': case 'M': case 'N': case 'O':
2877 cls = (int) reg_class_subunion[cls][(int) BASE_REG_CLASS];
2878 nothing_allowed = 0;
2883 cls = (int) reg_class_subunion[cls][(int) GENERAL_REGS];
2884 nothing_allowed = 0;
2889 (int) reg_class_subunion[cls][(int)
2890 REG_CLASS_FROM_CONSTRAINT (c,
2893 p += CONSTRAINT_LEN (c, p);
2896 /* Now make conflicts between this web, and all hardregs, which
2897 are not allowed by the constraints. */
2898 if (nothing_allowed)
2900 /* If we had no real constraints nothing was explicitly
2901 allowed, so we allow the whole class (i.e. we make no
2902 additional conflicts). */
2903 CLEAR_HARD_REG_SET (conflict);
2907 COPY_HARD_REG_SET (conflict, usable_regs
2908 [reg_preferred_class (web->regno)]);
2909 IOR_HARD_REG_SET (conflict, usable_regs
2910 [reg_alternate_class (web->regno)]);
2911 AND_COMPL_HARD_REG_SET (conflict, allowed);
2912 /* We can't yet establish these conflicts. Reload must go first
2913 (or better said, we must implement some functionality of reload).
2914 E.g. if some operands must match, and they need the same color
2915 we don't see yet, that they do not conflict (because they match).
2916 For us it looks like two normal references with different DEFs,
2917 so they conflict, and as they both need the same color, the
2918 graph becomes uncolorable. */
2920 for (c = 0; c < FIRST_PSEUDO_REGISTER; c++)
2921 if (TEST_HARD_REG_BIT (conflict, c))
2922 record_conflict (web, hardreg2web[c]);
2928 ra_debug_msg (DUMP_ASM, " ASM constrain Web %d conflicts with:", web->id);
2929 for (c = 0; c < FIRST_PSEUDO_REGISTER; c++)
2930 if (TEST_HARD_REG_BIT (conflict, c))
2931 ra_debug_msg (DUMP_ASM, " %d", c);
2932 ra_debug_msg (DUMP_ASM, "\n");
2937 /* The real toplevel function in this file.
2938 Build (or rebuilds) the complete interference graph with webs
2942 build_i_graph (struct df *df)
2946 init_web_parts (df);
2948 sbitmap_zero (move_handled);
2951 build_web_parts_and_conflicts (df);
2953 /* For read-modify-write instructions we may have created two webs.
2954 Reconnect them here. (s.a.) */
2955 connect_rmw_web_parts (df);
2957 /* The webs are conceptually complete now, but still scattered around as
2958 connected web parts. Collect all information and build the webs
2959 including all conflicts between webs (instead web parts). */
2963 /* Look for additional constraints given by asms. */
2964 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2965 handle_asm_insn (df, insn);
2968 /* Allocates or reallocates most memory for the interference graph and
2969 associated structures. If it reallocates memory (meaning, this is not
2970 the first pass), this also changes some structures to reflect the
2971 additional entries in various array, and the higher number of
2975 ra_build_realloc (struct df *df)
2977 struct web_part *last_web_parts = web_parts;
2978 struct web **last_def2web = def2web;
2979 struct web **last_use2web = use2web;
2980 sbitmap last_live_over_abnormal = live_over_abnormal;
2983 move_handled = sbitmap_alloc (get_max_uid () );
2984 web_parts = xcalloc (df->def_id + df->use_id, sizeof web_parts[0]);
2985 def2web = xcalloc (df->def_id + df->use_id, sizeof def2web[0]);
2986 use2web = &def2web[df->def_id];
2987 live_over_abnormal = sbitmap_alloc (df->use_id);
2988 sbitmap_zero (live_over_abnormal);
2990 /* First go through all old defs and uses. */
2991 for (i = 0; i < last_def_id + last_use_id; i++)
2993 /* And relocate them to the new array. This is made ugly by the
2994 fact, that defs and uses are placed consecutive into one array. */
2995 struct web_part *dest = &web_parts[i < last_def_id
2996 ? i : (df->def_id + i - last_def_id)];
2997 struct web_part *up;
2998 *dest = last_web_parts[i];
3000 dest->uplink = NULL;
3002 /* Also relocate the uplink to point into the new array. */
3005 unsigned int id = DF_REF_ID (up->ref);
3006 if (up < &last_web_parts[last_def_id])
3009 dest->uplink = &web_parts[DF_REF_ID (up->ref)];
3011 else if (df->uses[id])
3012 dest->uplink = &web_parts[df->def_id + DF_REF_ID (up->ref)];
3016 /* Also set up the def2web and use2web arrays, from the last pass.i
3017 Remember also the state of live_over_abnormal. */
3018 for (i = 0; i < last_def_id; i++)
3020 struct web *web = last_def2web[i];
3023 web = find_web_for_subweb (web);
3024 if (web->type != FREE && web->type != PRECOLORED)
3025 def2web[i] = last_def2web[i];
3028 for (i = 0; i < last_use_id; i++)
3030 struct web *web = last_use2web[i];
3033 web = find_web_for_subweb (web);
3034 if (web->type != FREE && web->type != PRECOLORED)
3035 use2web[i] = last_use2web[i];
3037 if (TEST_BIT (last_live_over_abnormal, i))
3038 SET_BIT (live_over_abnormal, i);
3041 /* We don't have any subwebs for now. Somewhen we might want to
3042 remember them too, instead of recreating all of them every time.
3043 The problem is, that which subwebs we need, depends also on what
3044 other webs and subwebs exist, and which conflicts are there.
3045 OTOH it should be no problem, if we had some more subwebs than strictly
3047 for (d = WEBS(FREE); d; d = d->next)
3049 struct web *web = DLIST_WEB (d);
3051 for (web = web->subreg_next; web; web = wnext)
3053 wnext = web->subreg_next;
3056 DLIST_WEB (d)->subreg_next = NULL;
3059 /* The uses we anyway are going to check, are not yet live over an abnormal
3060 edge. In fact, they might actually not anymore, due to added
3062 if (last_check_uses)
3063 sbitmap_difference (live_over_abnormal, live_over_abnormal,
3066 if (last_def_id || last_use_id)
3068 sbitmap_free (last_live_over_abnormal);
3069 free (last_web_parts);
3070 free (last_def2web);
3074 /* Setup copy cache, for copy_insn_p (). */
3075 copy_cache = xcalloc (get_max_uid (), sizeof (copy_cache[0]));
3080 copy_cache = xrealloc (copy_cache, get_max_uid () * sizeof (copy_cache[0]));
3081 memset (©_cache[last_max_uid], 0,
3082 (get_max_uid () - last_max_uid) * sizeof (copy_cache[0]));
3086 /* Free up/clear some memory, only needed for one pass. */
3089 ra_build_free (void)
3094 /* Clear the moves associated with a web (we also need to look into
3096 for (i = 0; i < num_webs; i++)
3098 struct web *web = ID2WEB (i);
3100 gcc_assert (i < num_webs - num_subwebs
3101 || (!web->conflict_list && !web->orig_conflict_list));
3104 /* All webs in the free list have no defs or uses anymore. */
3105 for (d = WEBS(FREE); d; d = d->next)
3107 struct web *web = DLIST_WEB (d);
3114 /* We can't free the subwebs here, as they are referenced from
3115 def2web[], and possibly needed in the next ra_build_realloc().
3116 We free them there (or in free_all_mem()). */
3119 /* Free all conflict bitmaps from web parts. Note that we clear
3120 _all_ these conflicts, and don't rebuild them next time for uses
3121 which aren't rechecked. This mean, that those conflict bitmaps
3122 only contain the incremental information. The cumulative one
3123 is still contained in the edges of the I-graph, i.e. in
3124 conflict_list (or orig_conflict_list) of the webs. */
3125 for (i = 0; i < df->def_id + df->use_id; i++)
3127 struct tagged_conflict *cl;
3128 for (cl = web_parts[i].sub_conflicts; cl; cl = cl->next)
3131 BITMAP_XFREE (cl->conflicts);
3133 web_parts[i].sub_conflicts = NULL;
3139 free (move_handled);
3140 sbitmap_free (sup_igraph);
3141 sbitmap_free (igraph);
3144 /* Free all memory for the interference graph structures. */
3147 ra_build_free_all (struct df *df)
3154 for (i = 0; i < df->def_id + df->use_id; i++)
3156 struct tagged_conflict *cl;
3157 for (cl = web_parts[i].sub_conflicts; cl; cl = cl->next)
3160 BITMAP_XFREE (cl->conflicts);
3162 web_parts[i].sub_conflicts = NULL;
3164 sbitmap_free (live_over_abnormal);
3167 if (last_check_uses)
3168 sbitmap_free (last_check_uses);
3169 last_check_uses = NULL;
3175 #include "gt-ra-build.h"
3178 vim:cinoptions={.5s,g0,p5,t0,(0,^-0.5s,n-0.5s:tw=78:cindent:sw=4: