/* Graph coloring register allocator
- Copyright (C) 2001, 2002, 2003 Free Software Foundation, Inc.
+ Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
Contributed by Michael Matz <matz@suse.de>
and Daniel Berlin <dan@cgsoftware.com>
struct curr_use;
-static unsigned HOST_WIDE_INT rtx_to_undefined PARAMS ((rtx));
-static bitmap find_sub_conflicts PARAMS ((struct web_part *, unsigned int));
-static bitmap get_sub_conflicts PARAMS ((struct web_part *, unsigned int));
-static unsigned int undef_to_size_word PARAMS ((rtx, unsigned HOST_WIDE_INT *));
-static bitmap undef_to_bitmap PARAMS ((struct web_part *,
- unsigned HOST_WIDE_INT *));
-static struct web_part * find_web_part_1 PARAMS ((struct web_part *));
+static unsigned HOST_WIDE_INT rtx_to_undefined (rtx);
+static bitmap find_sub_conflicts (struct web_part *, unsigned int);
+static bitmap get_sub_conflicts (struct web_part *, unsigned int);
+static unsigned int undef_to_size_word (rtx, unsigned HOST_WIDE_INT *);
+static bitmap undef_to_bitmap (struct web_part *,
+ unsigned HOST_WIDE_INT *);
+static struct web_part * find_web_part_1 (struct web_part *);
static struct web_part * union_web_part_roots
- PARAMS ((struct web_part *, struct web_part *));
-static int defuse_overlap_p_1 PARAMS ((rtx, struct curr_use *));
-static int live_out_1 PARAMS ((struct df *, struct curr_use *, rtx));
-static int live_out PARAMS ((struct df *, struct curr_use *, rtx));
-static rtx live_in_edge PARAMS (( struct df *, struct curr_use *, edge));
-static void live_in PARAMS ((struct df *, struct curr_use *, rtx));
-static int copy_insn_p PARAMS ((rtx, rtx *, rtx *));
-static void remember_move PARAMS ((rtx));
-static void handle_asm_insn PARAMS ((struct df *, rtx));
-static void prune_hardregs_for_mode PARAMS ((HARD_REG_SET *,
- enum machine_mode));
-static void init_one_web_common PARAMS ((struct web *, rtx));
-static void init_one_web PARAMS ((struct web *, rtx));
-static void reinit_one_web PARAMS ((struct web *, rtx));
-static struct web * add_subweb PARAMS ((struct web *, rtx));
-static struct web * add_subweb_2 PARAMS ((struct web *, unsigned int));
-static void init_web_parts PARAMS ((struct df *));
-static void copy_conflict_list PARAMS ((struct web *));
-static void add_conflict_edge PARAMS ((struct web *, struct web *));
-static void build_inverse_webs PARAMS ((struct web *));
-static void copy_web PARAMS ((struct web *, struct web_link **));
-static void compare_and_free_webs PARAMS ((struct web_link **));
-static void init_webs_defs_uses PARAMS ((void));
-static unsigned int parts_to_webs_1 PARAMS ((struct df *, struct web_link **,
- struct df_link *));
-static void parts_to_webs PARAMS ((struct df *));
-static void reset_conflicts PARAMS ((void));
+ (struct web_part *, struct web_part *);
+static int defuse_overlap_p_1 (rtx, struct curr_use *);
+static int live_out_1 (struct df *, struct curr_use *, rtx);
+static int live_out (struct df *, struct curr_use *, rtx);
+static rtx live_in_edge ( struct df *, struct curr_use *, edge);
+static void live_in (struct df *, struct curr_use *, rtx);
+static int copy_insn_p (rtx, rtx *, rtx *);
+static void remember_move (rtx);
+static void handle_asm_insn (struct df *, rtx);
+static void prune_hardregs_for_mode (HARD_REG_SET *, enum machine_mode);
+static void init_one_web_common (struct web *, rtx);
+static void init_one_web (struct web *, rtx);
+static void reinit_one_web (struct web *, rtx);
+static struct web * add_subweb (struct web *, rtx);
+static struct web * add_subweb_2 (struct web *, unsigned int);
+static void init_web_parts (struct df *);
+static void copy_conflict_list (struct web *);
+static void add_conflict_edge (struct web *, struct web *);
+static void build_inverse_webs (struct web *);
+static void copy_web (struct web *, struct web_link **);
+static void compare_and_free_webs (struct web_link **);
+static void init_webs_defs_uses (void);
+static unsigned int parts_to_webs_1 (struct df *, struct web_link **,
+ struct df_link *);
+static void parts_to_webs (struct df *);
+static void reset_conflicts (void);
#if 0
-static void check_conflict_numbers PARAMS ((void));
+static void check_conflict_numbers (void)
#endif
-static void conflicts_between_webs PARAMS ((struct df *));
-static void remember_web_was_spilled PARAMS ((struct web *));
-static void detect_spill_temps PARAMS ((void));
-static int contains_pseudo PARAMS ((rtx));
-static int want_to_remat PARAMS ((rtx x));
-static void detect_remat_webs PARAMS ((void));
-static void determine_web_costs PARAMS ((void));
-static void detect_webs_set_in_cond_jump PARAMS ((void));
-static void make_webs PARAMS ((struct df *));
-static void moves_to_webs PARAMS ((struct df *));
-static void connect_rmw_web_parts PARAMS ((struct df *));
-static void update_regnos_mentioned PARAMS ((void));
-static void livethrough_conflicts_bb PARAMS ((basic_block));
-static void init_bb_info PARAMS ((void));
-static void free_bb_info PARAMS ((void));
-static void build_web_parts_and_conflicts PARAMS ((struct df *));
+static void conflicts_between_webs (struct df *);
+static void remember_web_was_spilled (struct web *);
+static void detect_spill_temps (void);
+static int contains_pseudo (rtx);
+static int want_to_remat (rtx x);
+static void detect_remat_webs (void);
+static void determine_web_costs (void);
+static void detect_webs_set_in_cond_jump (void);
+static void make_webs (struct df *);
+static void moves_to_webs (struct df *);
+static void connect_rmw_web_parts (struct df *);
+static void update_regnos_mentioned (void);
+static void livethrough_conflicts_bb (basic_block);
+static void init_bb_info (void);
+static void free_bb_info (void);
+static void build_web_parts_and_conflicts (struct df *);
/* A sbitmap of DF_REF_IDs of uses, which are live over an abnormal
as an integer. */
unsigned int
-rtx_to_bits (x)
- rtx x;
+rtx_to_bits (rtx x)
{
unsigned int len, beg;
len = GET_MODE_SIZE (GET_MODE (x));
/* X is a REG or SUBREG rtx. Return the bytes it touches as a bitmask. */
static unsigned HOST_WIDE_INT
-rtx_to_undefined (x)
- rtx x;
+rtx_to_undefined (rtx x)
{
unsigned int len, beg;
unsigned HOST_WIDE_INT ret;
not NULL. */
static int
-copy_insn_p (insn, source, target)
- rtx insn;
- rtx *source;
- rtx *target;
+copy_insn_p (rtx insn, rtx *source, rtx *target)
{
rtx d, s;
unsigned int d_regno, s_regno;
coalescing (the check for this is in remember_move() below). */
while (GET_CODE (d) == STRICT_LOW_PART)
d = XEXP (d, 0);
- if (GET_CODE (d) != REG
- && (GET_CODE (d) != SUBREG || GET_CODE (SUBREG_REG (d)) != REG))
+ if (!REG_P (d)
+ && (GET_CODE (d) != SUBREG || !REG_P (SUBREG_REG (d))))
return 0;
while (GET_CODE (s) == STRICT_LOW_PART)
s = XEXP (s, 0);
- if (GET_CODE (s) != REG
- && (GET_CODE (s) != SUBREG || GET_CODE (SUBREG_REG (s)) != REG))
+ if (!REG_P (s)
+ && (GET_CODE (s) != SUBREG || !REG_P (SUBREG_REG (s))))
return 0;
s_regno = (unsigned) REGNO (GET_CODE (s) == SUBREG ? SUBREG_REG (s) : s);
exist yet in WP. */
static bitmap
-find_sub_conflicts (wp, size_word)
- struct web_part *wp;
- unsigned int size_word;
+find_sub_conflicts (struct web_part *wp, unsigned int size_word)
{
struct tagged_conflict *cl;
cl = wp->sub_conflicts;
doesn't exist. I.e. this never returns NULL. */
static bitmap
-get_sub_conflicts (wp, size_word)
- struct web_part *wp;
- unsigned int size_word;
+get_sub_conflicts (struct web_part *wp, unsigned int size_word)
{
bitmap b = find_sub_conflicts (wp, size_word);
if (!b)
{
- struct tagged_conflict *cl =
- (struct tagged_conflict *) ra_alloc (sizeof *cl);
+ struct tagged_conflict *cl = ra_alloc (sizeof *cl);
cl->conflicts = BITMAP_XMALLOC ();
cl->size_word = size_word;
cl->next = wp->sub_conflicts;
*/
static unsigned int
-undef_to_size_word (reg, undefined)
- rtx reg;
- unsigned HOST_WIDE_INT *undefined;
+undef_to_size_word (rtx reg, unsigned HOST_WIDE_INT *undefined)
{
/* When only the lower four bits are possibly set, we use
a fast lookup table. */
covered by the part for that bitmap. */
static bitmap
-undef_to_bitmap (wp, undefined)
- struct web_part *wp;
- unsigned HOST_WIDE_INT *undefined;
+undef_to_bitmap (struct web_part *wp, unsigned HOST_WIDE_INT *undefined)
{
unsigned int size_word = undef_to_size_word (DF_REF_REAL_REG (wp->ref),
undefined);
it compresses the path. P may not be NULL. */
static struct web_part *
-find_web_part_1 (p)
- struct web_part *p;
+find_web_part_1 (struct web_part *p)
{
struct web_part *r = p;
struct web_part *p_next;
The root of the resulting (possibly larger) web part is returned. */
static struct web_part *
-union_web_part_roots (r1, r2)
- struct web_part *r1, *r2;
+union_web_part_roots (struct web_part *r1, struct web_part *r2)
{
if (r1 != r2)
{
/* Remember that we've handled a given move, so we don't reprocess it. */
static void
-remember_move (insn)
- rtx insn;
+remember_move (rtx insn)
{
if (!TEST_BIT (move_handled, INSN_UID (insn)))
{
Those would be difficult to coalesce (we would need to implement
handling of all the subwebs in the allocator, including that such
subwebs could be source and target of coalescing). */
- if (GET_CODE (s) == REG && GET_CODE (d) == REG)
+ if (REG_P (s) && REG_P (d))
{
- struct move *m = (struct move *) ra_calloc (sizeof (struct move));
+ struct move *m = ra_calloc (sizeof (struct move));
struct move_list *ml;
m->insn = insn;
- ml = (struct move_list *) ra_alloc (sizeof (struct move_list));
+ ml = ra_alloc (sizeof (struct move_list));
ml->move = m;
ml->next = wl_moves;
wl_moves = ml;
4 if both are SUBREG's of different size, but have bytes in common.
-1 is a special case, for when DEF and USE refer to the same regno, but
have for other reasons no bits in common (can only happen with
- subregs refering to different words, or to words which already were
+ subregs referring to different words, or to words which already were
defined for this USE).
Furthermore it modifies use->undefined to clear the bits which get defined
by DEF (only for cases with partial overlap).
otherwise a test is needed to track the already defined bytes. */
static int
-defuse_overlap_p_1 (def, use)
- rtx def;
- struct curr_use *use;
+defuse_overlap_p_1 (rtx def, struct curr_use *use)
{
int mode = 0;
if (def == use->x)
if they refer to the same word. */
if (SUBREG_BYTE (def) == SUBREG_BYTE (use->x))
return 1;
- /* Now the more difficult part: the same regno is refered, but the
+ /* Now the more difficult part: the same regno is referred, but the
sizes of the references or the words differ. E.g.
(subreg:SI (reg:CDI a) 0) and (subreg:DI (reg:CDI a) 2) do not
overlap, whereas the latter overlaps with (subreg:SI (reg:CDI a) 3).
this insn. */
static int
-live_out_1 (df, use, insn)
- struct df *df ATTRIBUTE_UNUSED;
- struct curr_use *use;
- rtx insn;
+live_out_1 (struct df *df ATTRIBUTE_UNUSED, struct curr_use *use, rtx insn)
{
int defined = 0;
int uid = INSN_UID (insn);
/* We want to access the root webpart. */
wp = find_web_part (wp);
- if (GET_CODE (insn) == CALL_INSN)
+ if (CALL_P (insn))
wp->crosses_call = 1;
else if (copy_insn_p (insn, &s, NULL))
source_regno = REGNO (GET_CODE (s) == SUBREG ? SUBREG_REG (s) : s);
this insn). */
static inline int
-live_out (df, use, insn)
- struct df *df;
- struct curr_use *use;
- rtx insn;
+live_out (struct df *df, struct curr_use *use, rtx insn)
{
unsigned int uid = INSN_UID (insn);
if (visit_trace[uid].wp
which uses are live at the end of that basic block. */
static rtx
-live_in_edge (df, use, e)
- struct df *df;
- struct curr_use *use;
- edge e;
+live_in_edge (struct df *df, struct curr_use *use, edge e)
{
struct ra_bb_info *info_pred;
rtx next_insn;
use->live_over_abnormal = 1;
bitmap_set_bit (live_at_end[e->src->index], DF_REF_ID (use->wp->ref));
info_pred = (struct ra_bb_info *) e->src->aux;
- next_insn = e->src->end;
+ next_insn = BB_END (e->src);
/* If the last insn of the pred. block doesn't completely define the
current use, we need to check the block. */
creation to later. */
bitmap_set_bit (info_pred->live_throughout,
DF_REF_ID (use->wp->ref));
- next_insn = e->src->head;
+ next_insn = BB_HEAD (e->src);
}
return next_insn;
}
def-use chains, and all defs during that chain are noted. */
static void
-live_in (df, use, insn)
- struct df *df;
- struct curr_use *use;
- rtx insn;
+live_in (struct df *df, struct curr_use *use, rtx insn)
{
unsigned int loc_vpass = visited_pass;
pass. */
static void
-update_regnos_mentioned ()
+update_regnos_mentioned (void)
{
int last_uid = last_max_uid;
rtx insn;
spanned_deaths members. */
static void
-livethrough_conflicts_bb (bb)
- basic_block bb;
+livethrough_conflicts_bb (basic_block bb)
{
struct ra_bb_info *info = (struct ra_bb_info *) bb->aux;
rtx insn;
/* First collect the IDs of all defs, count the number of death
containing insns, and if there's some call_insn here. */
all_defs = BITMAP_XMALLOC ();
- for (insn = bb->head; insn; insn = NEXT_INSN (insn))
+ for (insn = BB_HEAD (bb); insn; insn = NEXT_INSN (insn))
{
if (INSN_P (insn))
{
bitmap_set_bit (all_defs, DF_REF_ID (info.defs[n]));
if (TEST_BIT (insns_with_deaths, INSN_UID (insn)))
deaths++;
- if (GET_CODE (insn) == CALL_INSN)
+ if (CALL_P (insn))
contains_call = 1;
}
- if (insn == bb->end)
+ if (insn == BB_END (bb))
break;
}
/* And now, if we have found anything, make all live_through
uses conflict with all defs, and update their other members. */
- if (deaths > 0 || bitmap_first_set_bit (all_defs) >= 0)
+ if (deaths > 0
+ || contains_call
+ || bitmap_first_set_bit (all_defs) >= 0)
EXECUTE_IF_SET_IN_BITMAP (info->live_throughout, first, use_id,
{
struct web_part *wp = &web_parts[df->def_id + use_id];
building live ranges. */
static void
-init_bb_info ()
+init_bb_info (void)
{
basic_block bb;
FOR_ALL_BB (bb)
{
- struct ra_bb_info *info =
- (struct ra_bb_info *) xcalloc (1, sizeof *info);
+ struct ra_bb_info *info = xcalloc (1, sizeof *info);
info->regnos_mentioned = BITMAP_XMALLOC ();
info->live_throughout = BITMAP_XMALLOC ();
info->old_aux = bb->aux;
/* Free that per basic block info. */
static void
-free_bb_info ()
+free_bb_info (void)
{
basic_block bb;
FOR_ALL_BB (bb)
their conflicts. */
static void
-build_web_parts_and_conflicts (df)
- struct df *df;
+build_web_parts_and_conflicts (struct df *df)
{
struct df_link *link;
struct curr_use use;
basic_block bb;
- number_seen = (int *) xcalloc (get_max_uid (), sizeof (int));
- visit_trace = (struct visit_trace *) xcalloc (get_max_uid (),
- sizeof (visit_trace[0]));
+ number_seen = xcalloc (get_max_uid (), sizeof (int));
+ visit_trace = xcalloc (get_max_uid (), sizeof (visit_trace[0]));
update_regnos_mentioned ();
/* Here's the main loop.
read-mod-write instruction), so we must reconnect such webs. */
static void
-connect_rmw_web_parts (df)
- struct df *df;
+connect_rmw_web_parts (struct df *df)
{
unsigned int i;
/* Deletes all hardregs from *S which are not allowed for MODE. */
static void
-prune_hardregs_for_mode (s, mode)
- HARD_REG_SET *s;
- enum machine_mode mode;
+prune_hardregs_for_mode (HARD_REG_SET *s, enum machine_mode mode)
{
AND_HARD_REG_SET (*s, hardregs_for_mode[(int) mode]);
}
/* Initialize the members of a web, which are deducible from REG. */
static void
-init_one_web_common (web, reg)
- struct web *web;
- rtx reg;
+init_one_web_common (struct web *web, rtx reg)
{
- if (GET_CODE (reg) != REG)
+ if (!REG_P (reg))
abort ();
/* web->id isn't initialized here. */
web->regno = REGNO (reg);
web->orig_x = reg;
if (!web->dlink)
{
- web->dlink = (struct dlist *) ra_calloc (sizeof (struct dlist));
+ web->dlink = ra_calloc (sizeof (struct dlist));
DLIST_WEB (web->dlink) = web;
}
/* XXX
AND_COMPL_HARD_REG_SET (web->usable_regs, never_use_colors);
prune_hardregs_for_mode (&web->usable_regs,
PSEUDO_REGNO_MODE (web->regno));
-#ifdef CLASS_CANNOT_CHANGE_MODE
+#ifdef CANNOT_CHANGE_MODE_CLASS
if (web->mode_changed)
- AND_COMPL_HARD_REG_SET (web->usable_regs, reg_class_contents[
- (int) CLASS_CANNOT_CHANGE_MODE]);
+ AND_COMPL_HARD_REG_SET (web->usable_regs, invalid_mode_change_regs);
#endif
web->num_freedom = hard_regs_count (web->usable_regs);
web->num_freedom -= web->add_hardregs;
/* Initializes WEBs members from REG or zero them. */
static void
-init_one_web (web, reg)
- struct web *web;
- rtx reg;
+init_one_web (struct web *web, rtx reg)
{
memset (web, 0, sizeof (struct web));
init_one_web_common (web, reg);
members. */
static void
-reinit_one_web (web, reg)
- struct web *web;
- rtx reg;
+reinit_one_web (struct web *web, rtx reg)
{
web->old_color = web->color + 1;
init_one_web_common (web, reg);
web->artificial = 0;
web->live_over_abnormal = 0;
web->mode_changed = 0;
+ web->subreg_stripped = 0;
web->move_related = 0;
web->in_load = 0;
web->target_of_spilled_move = 0;
becomes its super web). It must not exist already. */
static struct web *
-add_subweb (web, reg)
- struct web *web;
- rtx reg;
+add_subweb (struct web *web, rtx reg)
{
struct web *w;
if (GET_CODE (reg) != SUBREG)
abort ();
- w = (struct web *) xmalloc (sizeof (struct web));
+ w = xmalloc (sizeof (struct web));
/* Copy most content from parent-web. */
*w = *web;
/* And initialize the private stuff. */
In difference to add_subweb() this marks the new subweb as artificial. */
static struct web *
-add_subweb_2 (web, size_word)
- struct web *web;
- unsigned int size_word;
+add_subweb_2 (struct web *web, unsigned int size_word)
{
/* To get a correct mode for the to be produced subreg, we don't want to
simply do a mode_for_size() for the mode_class of the whole web.
/* Initialize all the web parts we are going to need. */
static void
-init_web_parts (df)
- struct df *df;
+init_web_parts (struct df *df)
{
int regno;
unsigned int no;
num_webs++;
}
else
- /* The last iteration might have left .ref set, while df_analyse()
+ /* The last iteration might have left .ref set, while df_analyze()
removed that ref (due to a removed copy insn) from the df->defs[]
array. As we don't check for that in realloc_web_parts()
we do that here. */
new conflicts, we copy it here to orig_conflict_list. */
static void
-copy_conflict_list (web)
- struct web *web;
+copy_conflict_list (struct web *web)
{
struct conflict_link *cl;
if (web->orig_conflict_list || web->have_orig_conflicts)
for (cl = web->conflict_list; cl; cl = cl->next)
{
struct conflict_link *ncl;
- ncl = (struct conflict_link *) ra_alloc (sizeof *ncl);
+ ncl = ra_alloc (sizeof *ncl);
ncl->t = cl->t;
ncl->sub = NULL;
ncl->next = web->orig_conflict_list;
struct sub_conflict *sl, *nsl;
for (sl = cl->sub; sl; sl = sl->next)
{
- nsl = (struct sub_conflict *) ra_alloc (sizeof *nsl);
+ nsl = ra_alloc (sizeof *nsl);
nsl->s = sl->s;
nsl->t = sl->t;
nsl->next = ncl->sub;
happen, if SUBREG webs are involved. */
static void
-add_conflict_edge (from, to)
- struct web *from, *to;
+add_conflict_edge (struct web *from, struct web *to)
{
if (from->type != PRECOLORED)
{
copy_conflict_list (pfrom);
if (!TEST_BIT (sup_igraph, (pfrom->id * num_webs + pto->id)))
{
- cl = (struct conflict_link *) ra_alloc (sizeof (*cl));
+ cl = ra_alloc (sizeof (*cl));
cl->t = pto;
cl->sub = NULL;
cl->next = pfrom->conflict_list;
means we are not interested in this subconflict. */
if (!may_delete || cl->sub != NULL)
{
- sl = (struct sub_conflict *) ra_alloc (sizeof (*sl));
+ sl = ra_alloc (sizeof (*sl));
sl->s = from;
sl->t = to;
sl->next = cl->sub;
already. */
void
-record_conflict (web1, web2)
- struct web *web1, *web2;
+record_conflict (struct web *web1, struct web *web2)
{
unsigned int id1 = web1->id, id2 = web2->id;
unsigned int index = igraph_index (id1, id2);
possible to exactly specify (W-Wy) for all already existing subwebs Wy. */
static void
-build_inverse_webs (web)
- struct web *web;
+build_inverse_webs (struct web *web)
{
struct web *sweb = web->subreg_next;
unsigned HOST_WIDE_INT undef;
Used for consistency checking. */
static void
-copy_web (web, wl)
- struct web *web;
- struct web_link **wl;
+copy_web (struct web *web, struct web_link **wl)
{
- struct web *cweb = (struct web *) xmalloc (sizeof *cweb);
- struct web_link *link = (struct web_link *) ra_alloc (sizeof *link);
+ struct web *cweb = xmalloc (sizeof *cweb);
+ struct web_link *link = ra_alloc (sizeof *link);
link->next = *wl;
*wl = link;
link->web = cweb;
with the global webs of the same ID. For consistency checking. */
static void
-compare_and_free_webs (link)
- struct web_link **link;
+compare_and_free_webs (struct web_link **link)
{
struct web_link *wl;
for (wl = *link; wl; wl = wl->next)
&& (web1->num_uses != web2->num_uses
|| web1->num_defs != web2->num_defs))
/* Similarly, if the framepointer was unreferenced originally
- but we added spills, these fields may not match. */
+ but we added spills, these fields may not match. */
|| (web1->type != PRECOLORED
&& web1->crosses_call != web2->crosses_call)
|| (web1->type != PRECOLORED
/* Setup and fill uses[] and defs[] arrays of the webs. */
static void
-init_webs_defs_uses ()
+init_webs_defs_uses (void)
{
struct dlist *d;
for (d = WEBS(INITIAL); d; d = d->next)
continue;
}
if (web->num_defs)
- web->defs = (struct ref **) xmalloc (web->num_defs *
- sizeof (web->defs[0]));
+ web->defs = xmalloc (web->num_defs * sizeof (web->defs[0]));
if (web->num_uses)
- web->uses = (struct ref **) xmalloc (web->num_uses *
- sizeof (web->uses[0]));
+ web->uses = xmalloc (web->num_uses * sizeof (web->uses[0]));
def_i = use_i = 0;
for (link = web->temp_refs; link; link = link->next)
{
up use2web and def2web arrays. */
static unsigned int
-parts_to_webs_1 (df, copy_webs, all_refs)
- struct df *df;
- struct web_link **copy_webs;
- struct df_link *all_refs;
+parts_to_webs_1 (struct df *df, struct web_link **copy_webs,
+ struct df_link *all_refs)
{
unsigned int i;
unsigned int webnum;
allocate a new one. */
if (ra_pass == 1)
{
- web = (struct web *) xmalloc (sizeof (struct web));
+ web = xmalloc (sizeof (struct web));
newid = last_num_webs++;
init_one_web (web, GET_CODE (reg) == SUBREG
? SUBREG_REG (reg) : reg);
else
{
/* Else allocate a new one. */
- web = (struct web *) xmalloc (sizeof (struct web));
+ web = xmalloc (sizeof (struct web));
newid = last_num_webs++;
}
}
if ((DF_REF_FLAGS (ref) & DF_REF_MODE_CHANGE) != 0
&& web->regno >= FIRST_PSEUDO_REGISTER)
web->mode_changed = 1;
+ if ((DF_REF_FLAGS (ref) & DF_REF_STRIPPED) != 0
+ && web->regno >= FIRST_PSEUDO_REGISTER)
+ web->subreg_stripped = 1;
if (i >= def_id
&& TEST_BIT (live_over_abnormal, ref_id))
web->live_over_abnormal = 1;
if ((DF_REF_FLAGS (ref) & DF_REF_MODE_CHANGE) != 0
&& web->regno >= FIRST_PSEUDO_REGISTER)
web->mode_changed = 1;
+ if ((DF_REF_FLAGS (ref) & DF_REF_STRIPPED) != 0
+ && web->regno >= FIRST_PSEUDO_REGISTER)
+ web->subreg_stripped = 1;
/* Setup def2web, or use2web, and increment num_defs or num_uses. */
if (i < def_id)
other (i.e. without creating the conflict edges). */
static void
-parts_to_webs (df)
- struct df *df;
+parts_to_webs (struct df *df)
{
unsigned int i;
unsigned int webnum;
num_subwebs = 0;
/* First build webs and ordinary subwebs. */
- all_refs = (struct df_link *) xcalloc (df->def_id + df->use_id,
- sizeof (all_refs[0]));
+ all_refs = xcalloc (df->def_id + df->use_id, sizeof (all_refs[0]));
webnum = parts_to_webs_1 (df, ©_webs, all_refs);
/* Setup the webs for hardregs which are still missing (weren't
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
if (!hardreg2web[i])
{
- struct web *web = (struct web *) xmalloc (sizeof (struct web));
+ struct web *web = xmalloc (sizeof (struct web));
init_one_web (web, gen_rtx_REG (reg_raw_mode[i], i));
web->id = last_num_webs++;
hardreg2web[web->regno] = web;
}
/* Now that everyone has an ID, we can setup the id2web array. */
- id2web = (struct web **) xcalloc (webnum, sizeof (id2web[0]));
+ id2web = xcalloc (webnum, sizeof (id2web[0]));
for (d = WEBS(INITIAL); d; d = d->next)
{
struct web *web = DLIST_WEB (d);
conflicts. */
static void
-reset_conflicts ()
+reset_conflicts (void)
{
unsigned int i;
bitmap newwebs = BITMAP_XMALLOC ();
#if 0
static void
-check_conflict_numbers ()
+check_conflict_numbers (void)
{
unsigned int i;
for (i = 0; i < num_webs; i++)
in reality conflict get the same color. */
static void
-conflicts_between_webs (df)
- struct df *df;
+conflicts_between_webs (struct df *df)
{
unsigned int i;
#ifdef STACK_REGS
#endif
bitmap ignore_defs = BITMAP_XMALLOC ();
unsigned int have_ignored;
- unsigned int *pass_cache = (unsigned int *) xcalloc (num_webs, sizeof (int));
+ unsigned int *pass_cache = xcalloc (num_webs, sizeof (int));
unsigned int pass = 0;
if (ra_pass > 1)
accordingly. */
static void
-remember_web_was_spilled (web)
- struct web *web;
+remember_web_was_spilled (struct web *web)
{
int i;
unsigned int found_size = 0;
reg_class_contents[(int) GENERAL_REGS]);
AND_COMPL_HARD_REG_SET (web->usable_regs, never_use_colors);
prune_hardregs_for_mode (&web->usable_regs, PSEUDO_REGNO_MODE (web->regno));
-#ifdef CLASS_CANNOT_CHANGE_MODE
+#ifdef CANNOT_CHANGE_MODE_CLASS
if (web->mode_changed)
- AND_COMPL_HARD_REG_SET (web->usable_regs, reg_class_contents[
- (int) CLASS_CANNOT_CHANGE_MODE]);
+ AND_COMPL_HARD_REG_SET (web->usable_regs, invalid_mode_change_regs);
#endif
web->num_freedom = hard_regs_count (web->usable_regs);
if (!web->num_freedom)
if it will be spillable in this pass. */
static void
-detect_spill_temps ()
+detect_spill_temps (void)
{
struct dlist *d;
bitmap already = BITMAP_XMALLOC ();
continue;
/* A web with only defs and no uses can't be spilled. Nevertheless
- it must get a color, as it takes away an register from all webs
+ it must get a color, as it takes away a register from all webs
live at these defs. So we make it a short web. */
if (web->num_uses == 0)
web->spill_temp = 3;
/* Returns nonzero if the rtx MEM refers somehow to a stack location. */
int
-memref_is_stack_slot (mem)
- rtx mem;
+memref_is_stack_slot (rtx mem)
{
rtx ad = XEXP (mem, 0);
rtx x;
/* Returns nonzero, if rtx X somewhere contains any pseudo register. */
static int
-contains_pseudo (x)
- rtx x;
+contains_pseudo (rtx x)
{
const char *fmt;
int i;
if (GET_CODE (x) == SUBREG)
x = SUBREG_REG (x);
- if (GET_CODE (x) == REG)
+ if (REG_P (x))
{
if (REGNO (x) >= FIRST_PSEUDO_REGISTER)
return 1;
static GTY(()) rtx remat_test_insn;
static int
-want_to_remat (x)
- rtx x;
+want_to_remat (rtx x)
{
int num_clobbers = 0;
int icode;
and that value is simple enough, and want_to_remat() holds for it. */
static void
-detect_remat_webs ()
+detect_remat_webs (void)
{
struct dlist *d;
for (d = WEBS(INITIAL); d; d = d->next)
we created them ourself. They might not have set their
unchanging flag set, but nevertheless they are stable across
the livetime in question. */
- || (GET_CODE (src) == MEM
+ || (MEM_P (src)
&& INSN_UID (insn) >= orig_max_uid
&& memref_is_stack_slot (src)))
/* And we must be able to construct an insn without
/* Determine the spill costs of all webs. */
static void
-determine_web_costs ()
+determine_web_costs (void)
{
struct dlist *d;
for (d = WEBS(INITIAL); d; d = d->next)
which destroys the CFG. (Somewhen we want to deal with that XXX) */
static void
-detect_webs_set_in_cond_jump ()
+detect_webs_set_in_cond_jump (void)
{
basic_block bb;
FOR_EACH_BB (bb)
- if (GET_CODE (bb->end) == JUMP_INSN)
+ if (JUMP_P (BB_END (bb)))
{
struct df_link *link;
- for (link = DF_INSN_DEFS (df, bb->end); link; link = link->next)
+ for (link = DF_INSN_DEFS (df, BB_END (bb)); link; link = link->next)
if (link->ref && DF_REF_REGNO (link->ref) >= FIRST_PSEUDO_REGISTER)
{
struct web *web = def2web[DF_REF_ID (link->ref)];
though. */
static void
-make_webs (df)
- struct df *df;
+make_webs (struct df *df)
{
/* First build all the webs itself. They are not related with
others yet. */
/* Distribute moves to the corresponding webs. */
static void
-moves_to_webs (df)
- struct df *df;
+moves_to_webs (struct df *df)
{
struct df_link *link;
struct move_list *ml;
for (; test && test->move != m; test = test->next);
if (! test)
{
- newml = (struct move_list*)
- ra_alloc (sizeof (struct move_list));
+ newml = ra_alloc (sizeof (struct move_list));
newml->move = m;
newml->next = m->source_web->moves;
m->source_web->moves = newml;
for (; test && test->move != m; test = test->next);
if (! test)
{
- newml = (struct move_list*)
- ra_alloc (sizeof (struct move_list));
+ newml = ra_alloc (sizeof (struct move_list));
newml->move = m;
newml->next = m->target_web->moves;
m->target_web->moves = newml;
and constrain the allocator too much. */
static void
-handle_asm_insn (df, insn)
- struct df *df;
- rtx insn;
+handle_asm_insn (struct df *df, rtx insn)
{
const char *constraints[MAX_RECOG_OPERANDS];
enum machine_mode operand_mode[MAX_RECOG_OPERANDS];
for (i = 0; i < XVECLEN (pat, 0); i++)
{
rtx t = XVECEXP (pat, 0, i);
- if (GET_CODE (t) == CLOBBER && GET_CODE (XEXP (t, 0)) == REG
+ if (GET_CODE (t) == CLOBBER && REG_P (XEXP (t, 0))
&& REGNO (XEXP (t, 0)) < FIRST_PSEUDO_REGISTER)
SET_HARD_REG_BIT (clobbered, REGNO (XEXP (t, 0)));
}
|| GET_CODE (reg) == SIGN_EXTRACT
|| GET_CODE (reg) == STRICT_LOW_PART)
reg = XEXP (reg, 0);
- if (GET_CODE (reg) != REG || REGNO (reg) < FIRST_PSEUDO_REGISTER)
+ if (!REG_P (reg) || REGNO (reg) < FIRST_PSEUDO_REGISTER)
continue;
/* Search the web corresponding to this operand. We depend on
record_conflict (web, hardreg2web[c]);
#endif
}
- if (rtl_dump_file)
+ if (dump_file)
{
int c;
ra_debug_msg (DUMP_ASM, " ASM constrain Web %d conflicts with:", web->id);
and conflicts. */
void
-build_i_graph (df)
- struct df *df;
+build_i_graph (struct df *df)
{
rtx insn;
defs and uses. */
void
-ra_build_realloc (df)
- struct df *df;
+ra_build_realloc (struct df *df)
{
struct web_part *last_web_parts = web_parts;
struct web **last_def2web = def2web;
unsigned int i;
struct dlist *d;
move_handled = sbitmap_alloc (get_max_uid () );
- web_parts = (struct web_part *) xcalloc (df->def_id + df->use_id,
- sizeof web_parts[0]);
- def2web = (struct web **) xcalloc (df->def_id + df->use_id,
- sizeof def2web[0]);
+ web_parts = xcalloc (df->def_id + df->use_id, sizeof web_parts[0]);
+ def2web = xcalloc (df->def_id + df->use_id, sizeof def2web[0]);
use2web = &def2web[df->def_id];
live_over_abnormal = sbitmap_alloc (df->use_id);
sbitmap_zero (live_over_abnormal);
if (!last_max_uid)
{
/* Setup copy cache, for copy_insn_p (). */
- copy_cache = (struct copy_p_cache *)
- xcalloc (get_max_uid (), sizeof (copy_cache[0]));
+ copy_cache = xcalloc (get_max_uid (), sizeof (copy_cache[0]));
init_bb_info ();
}
else
{
- copy_cache = (struct copy_p_cache *)
- xrealloc (copy_cache, get_max_uid () * sizeof (copy_cache[0]));
+ copy_cache = xrealloc (copy_cache, get_max_uid () * sizeof (copy_cache[0]));
memset (©_cache[last_max_uid], 0,
(get_max_uid () - last_max_uid) * sizeof (copy_cache[0]));
}
/* Free up/clear some memory, only needed for one pass. */
void
-ra_build_free ()
+ra_build_free (void)
{
struct dlist *d;
unsigned int i;
/* Free all memory for the interference graph structures. */
void
-ra_build_free_all (df)
- struct df *df;
+ra_build_free_all (struct df *df)
{
unsigned int i;