for (cl2 = r2->sub_conflicts; cl2; cl2 = cl2->next)
if (cl1->size_word == cl2->size_word)
{
- bitmap_operation (cl1->conflicts, cl1->conflicts,
- cl2->conflicts, BITMAP_IOR);
+ bitmap_ior_into (cl1->conflicts, cl2->conflicts);
BITMAP_XFREE (cl2->conflicts);
cl2->conflicts = NULL;
}
are allowed. */
while (1)
{
+ unsigned int i;
int uid = INSN_UID (insn);
basic_block bb = BLOCK_FOR_INSN (insn);
number_seen[uid]++;
edge e;
unsigned HOST_WIDE_INT undef = use->undefined;
struct ra_bb_info *info = (struct ra_bb_info *) bb->aux;
- if ((e = bb->pred) == NULL)
+ if (EDGE_COUNT (bb->preds) == 0)
return;
/* We now check, if we already traversed the predecessors of this
block for the current pass and the current set of undefined
info->pass = loc_vpass;
info->undefined = undef;
/* All but the last predecessor are handled recursively. */
- for (; e->pred_next; e = e->pred_next)
+ for (e = NULL, i = 0; i < EDGE_COUNT (bb->preds) - 1; i++)
{
+ e = EDGE_PRED (bb, i);
insn = live_in_edge (df, use, e);
if (insn)
live_in (df, use, insn);
struct ra_bb_info *info = (struct ra_bb_info *) bb->aux;
rtx insn;
bitmap all_defs;
- int first, use_id;
+ int first;
+ unsigned use_id;
unsigned int deaths = 0;
unsigned int contains_call = 0;
uses conflict with all defs, and update their other members. */
if (deaths > 0
|| contains_call
- || bitmap_first_set_bit (all_defs) >= 0)
+ || !bitmap_empty_p (all_defs))
{
bitmap_iterator bi;
wp->spanned_deaths += deaths;
wp->crosses_call |= contains_call;
conflicts = get_sub_conflicts (wp, bl);
- bitmap_operation (conflicts, conflicts, all_defs, BITMAP_IOR);
+ bitmap_ior_into (conflicts, all_defs);
}
}
/* Useless conflicts will be rebuilt completely. But check
for cleanliness, as the web might have come from the
free list. */
- gcc_assert (bitmap_first_set_bit (web->useless_conflicts) < 0);
+ gcc_assert (bitmap_empty_p (web->useless_conflicts));
}
else
{
/* Useless conflicts with new webs will be rebuilt if they
are still there. */
- bitmap_operation (web->useless_conflicts, web->useless_conflicts,
- newwebs, BITMAP_AND_COMPL);
+ bitmap_and_compl_into (web->useless_conflicts, newwebs);
/* Go through all conflicts, and retain those to old webs. */
for (cl = web->conflict_list; cl; cl = cl->next)
{
for (i = 0; i < df->def_id; i++)
if (web_parts[i].ref == NULL)
bitmap_set_bit (ignore_defs, i);
- have_ignored = (bitmap_first_set_bit (ignore_defs) >= 0);
+ have_ignored = !bitmap_empty_p (ignore_defs);
/* Now record all conflicts between webs. Note that we only check
the conflict bitmaps of all defs. Conflict bitmaps are only in
for (; cl; cl = cl->next)
if (cl->conflicts)
{
- int j;
+ unsigned j;
struct web *web1 = find_subweb_2 (supweb1, cl->size_word);
bitmap_iterator bi;
if (have_ignored)
- bitmap_operation (cl->conflicts, cl->conflicts, ignore_defs,
- BITMAP_AND_COMPL);
+ bitmap_and_compl_into (cl->conflicts, ignore_defs);
/* We reduce the number of calls to record_conflict() with this
pass thing. record_conflict() itself also has some early-out
optimizations, but here we can use the special properties of