X-Git-Url: http://git.sourceforge.jp/view?a=blobdiff_plain;f=gcc%2Fdf.c;h=b23c2862b1707a891ee2b61f03be62091b7e166f;hb=68c22c86ba542b18e282465fcad642b41f0d00e0;hp=3a271c452533bf36943a61d860690fd23bb26e81;hpb=29f06995cc415c7de55cb47beae29d64447a766b;p=pf3gnuchains%2Fgcc-fork.git diff --git a/gcc/df.c b/gcc/df.c index 3a271c45253..b23c2862b17 100644 --- a/gcc/df.c +++ b/gcc/df.c @@ -1,5 +1,5 @@ /* Dataflow support routines. - Copyright (C) 1999, 2000, 2001, 2002 Free Software Foundation, Inc. + Copyright (C) 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc. Contributed by Michael P. Hayes (m.hayes@elec.canterbury.ac.nz, mhayes@redhat.com) @@ -54,7 +54,8 @@ Here's an example of using the dataflow routines. df_init simply creates a poor man's object (df) that needs to be passed to all the dataflow routines. df_finish destroys this -object and frees up any allocated memory. +object and frees up any allocated memory. DF_ALL says to analyse +everything. df_analyse performs the following: @@ -112,6 +113,7 @@ rather than searching the def or use bitmaps. If the insns are in SSA form then the reg-def and use-def lists should only contain the single defining ref. + TODO: 1) Incremental dataflow analysis. @@ -129,9 +131,7 @@ insns so when df_analyse is called we can easily determine all the new or deleted refs. Currently the global dataflow information is recomputed from scratch but this could be propagated more efficiently. -2) Improved global data flow computation using depth first search. - -3) Reduced memory requirements. +2) Reduced memory requirements. We could operate a pool of ref structures. When a ref is deleted it gets returned to the pool (say by linking on to a chain of free refs). @@ -140,18 +140,35 @@ tell which ones have been changed. Alternatively, we could periodically squeeze the def and use tables and associated bitmaps and renumber the def and use ids. -4) Ordering of reg-def and reg-use lists. +3) Ordering of reg-def and reg-use lists. Should the first entry in the def list be the first def (within a BB)? Similarly, should the first entry in the use list be the last use (within a BB)? -5) Working with a sub-CFG. +4) Working with a sub-CFG. Often the whole CFG does not need to be analyzed, for example, when optimising a loop, only certain registers are of interest. Perhaps there should be a bitmap argument to df_analyse to specify - which registers should be analyzed? */ +which registers should be analyzed? + + +NOTES: + +Embedded addressing side-effects, such as POST_INC or PRE_INC, generate +both a use and a def. These are both marked read/write to show that they +are dependent. For example, (set (reg 40) (mem (post_inc (reg 42)))) +will generate a use of reg 42 followed by a def of reg 42 (both marked +read/write). Similarly, (set (reg 40) (mem (pre_dec (reg 41)))) +generates a use of reg 41 then a def of reg 41 (both marked read/write), +even though reg 41 is decremented before it is used for the memory +address in this second example. + +A set to a REG inside a ZERO_EXTRACT, SIGN_EXTRACT, or SUBREG invokes +a read-modify write operation. We generate both a use and a def +and again mark them read/write. +*/ #include "config.h" #include "system.h" @@ -163,7 +180,7 @@ Perhaps there should be a bitmap argument to df_analyse to specify #include "recog.h" #include "function.h" #include "regs.h" -#include "obstack.h" +#include "alloc-pool.h" #include "hard-reg-set.h" #include "basic-block.h" #include "sbitmap.h" @@ -180,13 +197,11 @@ Perhaps there should be a bitmap argument to df_analyse to specify } \ while (0) -static struct obstack df_ref_obstack; +static alloc_pool df_ref_pool; +static alloc_pool df_link_pool; static struct df *ddf; static void df_reg_table_realloc PARAMS((struct df *, int)); -#if 0 -static void df_def_table_realloc PARAMS((struct df *, int)); -#endif static void df_insn_table_realloc PARAMS((struct df *, unsigned int)); static void df_bitmaps_alloc PARAMS((struct df *, int)); static void df_bitmaps_free PARAMS((struct df *, int)); @@ -293,7 +308,6 @@ static void hybrid_search_sbitmap PARAMS ((basic_block, sbitmap *, sbitmap *, enum df_confluence_op, transfer_function_sbitmap, sbitmap, sbitmap, void *)); -static inline bool read_modify_subreg_p PARAMS ((rtx)); /* Local memory allocation/deallocation routines. */ @@ -310,7 +324,7 @@ df_insn_table_realloc (df, size) if (size <= df->insn_size) return; - /* Make the table a little larger than requested, so we don't need + /* Make the table a little larger than requested, so we do not need to enlarge it so often. */ size += df->insn_size / 4; @@ -355,38 +369,6 @@ df_reg_table_realloc (df, size) } -#if 0 -/* Not currently used. */ -static void -df_def_table_realloc (df, size) - struct df *df; - int size; -{ - int i; - struct ref *refs; - - /* Make table 25 percent larger by default. */ - if (! size) - size = df->def_size / 4; - - df->def_size += size; - df->defs = xrealloc (df->defs, - df->def_size * sizeof (*df->defs)); - - /* Allocate a new block of memory and link into list of blocks - that will need to be freed later. */ - - refs = xmalloc (size * sizeof (*refs)); - - /* Link all the new refs together, overloading the chain field. */ - for (i = 0; i < size - 1; i++) - refs[i].chain = (struct df_link *) (refs + i + 1); - refs[size - 1].chain = 0; -} -#endif - - - /* Allocate bitmaps for each basic block. */ static void df_bitmaps_alloc (df, flags) @@ -521,7 +503,9 @@ df_alloc (df, n_regs) int n_insns; basic_block bb; - gcc_obstack_init (&df_ref_obstack); + df_link_pool = create_alloc_pool ("df_link pool", sizeof (struct df_link), + 100); + df_ref_pool = create_alloc_pool ("df_ref pool", sizeof (struct ref), 100); /* Perhaps we should use LUIDs to save memory for the insn_refs table. This is only a small saving; a few pointers. */ @@ -606,7 +590,9 @@ df_free (df) BITMAP_XFREE (df->all_blocks); df->all_blocks = 0; - obstack_free (&df_ref_obstack, NULL); + free_alloc_pool (df_ref_pool); + free_alloc_pool (df_link_pool); + } /* Local miscellaneous routines. */ @@ -648,8 +634,7 @@ df_link_create (ref, next) { struct df_link *link; - link = (struct df_link *) obstack_alloc (&df_ref_obstack, - sizeof (*link)); + link = pool_alloc (df_link_pool); link->next = next; link->ref = ref; return link; @@ -787,8 +772,7 @@ df_ref_create (df, reg, loc, insn, ref_type, ref_flags) { struct ref *this_ref; - this_ref = (struct ref *) obstack_alloc (&df_ref_obstack, - sizeof (*this_ref)); + this_ref = pool_alloc (df_ref_pool); DF_REF_REG (this_ref) = reg; DF_REF_LOC (this_ref) = loc; DF_REF_INSN (this_ref) = insn; @@ -867,6 +851,7 @@ df_ref_record (df, reg, loc, insn, ref_type, ref_flags) { loc = &SUBREG_REG (reg); reg = *loc; + ref_flags |= DF_REF_STRIPPED; } regno = REGNO (GET_CODE (reg) == SUBREG ? SUBREG_REG (reg) : reg); @@ -878,9 +863,9 @@ df_ref_record (df, reg, loc, insn, ref_type, ref_flags) if (! (df->flags & DF_HARD_REGS)) return; - /* GET_MODE (reg) is correct here. We don't want to go into a SUBREG + /* GET_MODE (reg) is correct here. We do not want to go into a SUBREG for the mode, because we only want to add references to regs, which - are really referenced. E.g. a (subreg:SI (reg:DI 0) 0) does _not_ + are really referenced. E.g., a (subreg:SI (reg:DI 0) 0) does _not_ reference the whole reg 0 in DI mode (which would also include reg 1, at least, if 0 and 1 are SImode registers). */ endregno = HARD_REGNO_NREGS (regno, GET_MODE (reg)); @@ -899,10 +884,10 @@ df_ref_record (df, reg, loc, insn, ref_type, ref_flags) } } -/* Writes to paradoxical subregs, or subregs which are too narrow - are read-modify-write. */ -static inline bool +/* Return non-zero if writes to paradoxical SUBREGs, or SUBREGs which + are too narrow, are read-modify-write. */ +bool read_modify_subreg_p (x) rtx x; { @@ -911,15 +896,11 @@ read_modify_subreg_p (x) return false; isize = GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))); osize = GET_MODE_SIZE (GET_MODE (x)); - if (isize <= osize) - return true; - if (isize <= UNITS_PER_WORD) - return false; - if (osize > UNITS_PER_WORD) - return false; - return true; + /* Paradoxical subreg writes don't leave a trace of the old content. */ + return (isize > osize && isize > UNITS_PER_WORD); } + /* Process all the registers defined in the rtx, X. */ static void df_def_record_1 (df, x, bb, insn) @@ -928,10 +909,18 @@ df_def_record_1 (df, x, bb, insn) basic_block bb; rtx insn; { - rtx *loc = &SET_DEST (x); - rtx dst = *loc; + rtx *loc; + rtx dst; enum df_ref_flags flags = 0; + /* We may recursivly call ourselves on EXPR_LIST when dealing with PARALLEL + construct. */ + if (GET_CODE (x) == EXPR_LIST || GET_CODE (x) == CLOBBER) + loc = &XEXP (x, 0); + else + loc = &SET_DEST (x); + dst = *loc; + /* Some targets place small structures in registers for return values of functions. */ if (GET_CODE (dst) == PARALLEL && GET_MODE (dst) == BLKmode) @@ -939,7 +928,12 @@ df_def_record_1 (df, x, bb, insn) int i; for (i = XVECLEN (dst, 0) - 1; i >= 0; i--) - df_def_record_1 (df, XVECEXP (dst, 0, i), bb, insn); + { + rtx temp = XVECEXP (dst, 0, i); + if (GET_CODE (temp) == EXPR_LIST || GET_CODE (temp) == CLOBBER + || GET_CODE (temp) == SET) + df_def_record_1 (df, temp, bb, insn); + } return; } @@ -950,14 +944,15 @@ df_def_record_1 (df, x, bb, insn) flags |= DF_REF_MODE_CHANGE; #endif - /* May be, we should flag the use of strict_low_part somehow. Might be - handy for the reg allocator. */ + /* Maybe, we should flag the use of STRICT_LOW_PART somehow. It might + be handy for the reg allocator. */ while (GET_CODE (dst) == STRICT_LOW_PART || GET_CODE (dst) == ZERO_EXTRACT || GET_CODE (dst) == SIGN_EXTRACT - || read_modify_subreg_p (dst)) + || ((df->flags & DF_FOR_REGALLOC) == 0 + && read_modify_subreg_p (dst))) { - /* Strict low part always contains SUBREG, but we don't want to make + /* Strict low part always contains SUBREG, but we do not want to make it appear outside, as whole register is always considered. */ if (GET_CODE (dst) == STRICT_LOW_PART) { @@ -1059,7 +1054,7 @@ df_uses_record (df, loc, ref_type, bb, insn, flags) case SUBREG: /* While we're here, optimize this case. */ - /* In case the SUBREG is not of a register, don't optimize. */ + /* In case the SUBREG is not of a REG, do not optimize. */ if (GET_CODE (SUBREG_REG (x)) != REG) { loc = &SUBREG_REG (x); @@ -1075,7 +1070,7 @@ df_uses_record (df, loc, ref_type, bb, insn, flags) /* ... Fall through ... */ case REG: - /* See a register (or subreg) other than being set. */ + /* See a REG (or SUBREG) other than being set. */ df_ref_record (df, x, loc, insn, ref_type, flags); return; @@ -1089,7 +1084,8 @@ df_uses_record (df, loc, ref_type, bb, insn, flags) { enum df_ref_flags use_flags; case SUBREG: - if (read_modify_subreg_p (dst)) + if ((df->flags & DF_FOR_REGALLOC) == 0 + && read_modify_subreg_p (dst)) { use_flags = DF_REF_READ_WRITE; #ifdef CLASS_CANNOT_CHANGE_MODE @@ -1113,7 +1109,7 @@ df_uses_record (df, loc, ref_type, bb, insn, flags) bb, insn, 0); break; case STRICT_LOW_PART: - /* A strict_low_part uses the whole reg not only the subreg. */ + /* A strict_low_part uses the whole REG and not just the SUBREG. */ dst = XEXP (dst, 0); if (GET_CODE (dst) != SUBREG) abort (); @@ -1121,7 +1117,7 @@ df_uses_record (df, loc, ref_type, bb, insn, flags) #ifdef CLASS_CANNOT_CHANGE_MODE if (CLASS_CANNOT_CHANGE_MODE_P (GET_MODE (dst), GET_MODE (SUBREG_REG (dst)))) - use_flags |= DF_REF_MODE_CHANGE; + use_flags |= DF_REF_MODE_CHANGE; #endif df_uses_record (df, &SUBREG_REG (dst), DF_REF_REG_USE, bb, insn, use_flags); @@ -1286,7 +1282,6 @@ df_insn_refs_record (df, bb, insn) df_uses_record (df, &PATTERN (insn), DF_REF_REG_USE, bb, insn, 0); - if (GET_CODE (insn) == CALL_INSN) { rtx note; @@ -1379,9 +1374,11 @@ df_bb_reg_def_chain_create (df, bb) { struct ref *def = link->ref; unsigned int dregno = DF_REF_REGNO (def); - /* Don't add ref's to the chain two times. I.e. only add - new refs. XXX the same could be done by testing if the current - insn is a modified (or a new) one. This would be faster. */ + + /* Do not add ref's to the chain twice, i.e., only add new + refs. XXX the same could be done by testing if the + current insn is a modified (or a new) one. This would be + faster. */ if (DF_REF_ID (def) < df->def_id_save) continue; @@ -1417,8 +1414,8 @@ df_bb_reg_use_chain_create (df, bb) { rtx insn; - /* Scan in forward order so that the last uses appear at the - start of the chain. */ + /* Scan in forward order so that the last uses appear at the start + of the chain. */ for (insn = bb->head; insn && insn != NEXT_INSN (bb->end); insn = NEXT_INSN (insn)) @@ -1433,9 +1430,11 @@ df_bb_reg_use_chain_create (df, bb) { struct ref *use = link->ref; unsigned int uregno = DF_REF_REGNO (use); - /* Don't add ref's to the chain two times. I.e. only add - new refs. XXX the same could be done by testing if the current - insn is a modified (or a new) one. This would be faster. */ + + /* Do not add ref's to the chain twice, i.e., only add new + refs. XXX the same could be done by testing if the + current insn is a modified (or a new) one. This would be + faster. */ if (DF_REF_ID (use) < df->use_id_save) continue; @@ -1606,7 +1605,7 @@ df_bb_ud_chain_create (df, bb) } - /* For each def in insn...record the last def of each reg. */ + /* For each def in insn... record the last def of each reg. */ for (def_link = df->insns[uid].defs; def_link; def_link = def_link->next) { struct ref *def = def_link->ref; @@ -1644,6 +1643,8 @@ df_rd_transfer_function (bb, changed, in, out, gen, kill, data) { *changed = bitmap_union_of_diff (out, gen, in, kill); } + + static void df_ru_transfer_function (bb, changed, in, out, gen, kill, data) int bb ATTRIBUTE_UNUSED; @@ -1654,6 +1655,7 @@ df_ru_transfer_function (bb, changed, in, out, gen, kill, data) *changed = bitmap_union_of_diff (in, gen, out, kill); } + static void df_lr_transfer_function (bb, changed, in, out, use, def, data) int bb ATTRIBUTE_UNUSED; @@ -1968,6 +1970,7 @@ df_luids_set (df, blocks) return total; } + /* Perform dataflow analysis using existing DF structure for blocks within BLOCKS. If BLOCKS is zero, use all basic blocks in the CFG. */ static void @@ -2077,7 +2080,7 @@ df_analyse_1 (df, blocks, flags, update) kill[bb->index] = DF_BB_INFO (df, bb)->rd_kill; } iterative_dataflow_bitmap (in, out, gen, kill, df->all_blocks, - FORWARD, UNION, df_rd_transfer_function, + DF_FORWARD, DF_UNION, df_rd_transfer_function, df->inverse_rc_map, NULL); free (in); free (out); @@ -2113,7 +2116,7 @@ df_analyse_1 (df, blocks, flags, update) kill[bb->index] = DF_BB_INFO (df, bb)->ru_kill; } iterative_dataflow_bitmap (in, out, gen, kill, df->all_blocks, - BACKWARD, UNION, df_ru_transfer_function, + DF_BACKWARD, DF_UNION, df_ru_transfer_function, df->inverse_rts_map, NULL); free (in); free (out); @@ -2152,7 +2155,7 @@ df_analyse_1 (df, blocks, flags, update) def[bb->index] = DF_BB_INFO (df, bb)->lr_def; } iterative_dataflow_bitmap (in, out, use, def, df->all_blocks, - BACKWARD, UNION, df_lr_transfer_function, + DF_BACKWARD, DF_UNION, df_lr_transfer_function, df->inverse_rts_map, NULL); free (in); free (out); @@ -2165,6 +2168,7 @@ df_analyse_1 (df, blocks, flags, update) { df_reg_info_compute (df, df->all_blocks); } + free (df->dfs_order); free (df->rc_order); free (df->rts_order); @@ -2243,7 +2247,7 @@ df_bb_refs_update (df, bb) rtx insn; int count = 0; - /* While we have to scan the chain of insns for this BB, we don't + /* While we have to scan the chain of insns for this BB, we do not need to allocate and queue a long chain of BB/INSN pairs. Using a bitmap for insns_modified saves memory and avoids queuing duplicates. */ @@ -2502,7 +2506,8 @@ df_insn_modify (df, bb, insn) } -typedef struct replace_args { +typedef struct replace_args +{ rtx match; rtx replacement; rtx insn; @@ -3282,6 +3287,8 @@ df_chain_dump (link, file) fprintf (file, "}"); } + +/* Dump a chain of refs with the associated regno. */ static void df_chain_dump_regno (link, file) struct df_link *link; @@ -3298,6 +3305,7 @@ df_chain_dump_regno (link, file) fprintf (file, "}"); } + /* Dump dataflow info. */ void df_dump (df, flags, file) @@ -3501,6 +3509,7 @@ df_insn_debug (df, insn, file) fprintf (file, "\n"); } + void df_insn_debug_regno (df, insn, file) struct df *df; @@ -3529,6 +3538,7 @@ df_insn_debug_regno (df, insn, file) fprintf (file, "\n"); } + static void df_regno_debug (df, regno, file) struct df *df; @@ -3564,7 +3574,8 @@ df_ref_debug (df, ref, file) df_chain_dump (DF_REF_CHAIN (ref), file); fprintf (file, "\n"); } - + +/* Functions for debugging from GDB. */ void debug_df_insn (insn) @@ -3622,6 +3633,7 @@ debug_df_chain (link) df_chain_dump (link, stderr); fputc ('\n', stderr); } + /* Hybrid search algorithm from "Implementation Techniques for Efficient Data-Flow Analysis of Large Programs". */ @@ -3642,12 +3654,13 @@ hybrid_search_bitmap (block, in, out, gen, kill, dir, int i = block->index; edge e; basic_block bb = block; + SET_BIT (visited, block->index); if (TEST_BIT (pending, block->index)) { - if (dir == FORWARD) + if (dir == DF_FORWARD) { - /* Calculate of predecessor_outs */ + /* Calculate of predecessor_outs. */ bitmap_zero (in[i]); for (e = bb->pred; e != 0; e = e->pred_next) { @@ -3655,10 +3668,10 @@ hybrid_search_bitmap (block, in, out, gen, kill, dir, continue; switch (conf_op) { - case UNION: + case DF_UNION: bitmap_a_or_b (in[i], in[i], out[e->src->index]); break; - case INTERSECTION: + case DF_INTERSECTION: bitmap_a_and_b (in[i], in[i], out[e->src->index]); break; } @@ -3666,7 +3679,7 @@ hybrid_search_bitmap (block, in, out, gen, kill, dir, } else { - /* Calculate of successor ins */ + /* Calculate of successor ins. */ bitmap_zero (out[i]); for (e = bb->succ; e != 0; e = e->succ_next) { @@ -3674,10 +3687,10 @@ hybrid_search_bitmap (block, in, out, gen, kill, dir, continue; switch (conf_op) { - case UNION: + case DF_UNION: bitmap_a_or_b (out[i], out[i], in[e->dest->index]); break; - case INTERSECTION: + case DF_INTERSECTION: bitmap_a_and_b (out[i], out[i], in[e->dest->index]); break; } @@ -3688,7 +3701,7 @@ hybrid_search_bitmap (block, in, out, gen, kill, dir, RESET_BIT (pending, i); if (changed) { - if (dir == FORWARD) + if (dir == DF_FORWARD) { for (e = bb->succ; e != 0; e = e->succ_next) { @@ -3708,7 +3721,7 @@ hybrid_search_bitmap (block, in, out, gen, kill, dir, } } } - if (dir == FORWARD) + if (dir == DF_FORWARD) { for (e = bb->succ; e != 0; e = e->succ_next) { @@ -3753,12 +3766,13 @@ hybrid_search_sbitmap (block, in, out, gen, kill, dir, int i = block->index; edge e; basic_block bb = block; + SET_BIT (visited, block->index); if (TEST_BIT (pending, block->index)) { - if (dir == FORWARD) + if (dir == DF_FORWARD) { - /* Calculate of predecessor_outs */ + /* Calculate of predecessor_outs. */ sbitmap_zero (in[i]); for (e = bb->pred; e != 0; e = e->pred_next) { @@ -3766,10 +3780,10 @@ hybrid_search_sbitmap (block, in, out, gen, kill, dir, continue; switch (conf_op) { - case UNION: + case DF_UNION: sbitmap_a_or_b (in[i], in[i], out[e->src->index]); break; - case INTERSECTION: + case DF_INTERSECTION: sbitmap_a_and_b (in[i], in[i], out[e->src->index]); break; } @@ -3777,7 +3791,7 @@ hybrid_search_sbitmap (block, in, out, gen, kill, dir, } else { - /* Calculate of successor ins */ + /* Calculate of successor ins. */ sbitmap_zero (out[i]); for (e = bb->succ; e != 0; e = e->succ_next) { @@ -3785,21 +3799,21 @@ hybrid_search_sbitmap (block, in, out, gen, kill, dir, continue; switch (conf_op) { - case UNION: + case DF_UNION: sbitmap_a_or_b (out[i], out[i], in[e->dest->index]); break; - case INTERSECTION: + case DF_INTERSECTION: sbitmap_a_and_b (out[i], out[i], in[e->dest->index]); break; } } } - /* Common part */ + /* Common part. */ (*transfun)(i, &changed, in[i], out[i], gen[i], kill[i], data); RESET_BIT (pending, i); if (changed) { - if (dir == FORWARD) + if (dir == DF_FORWARD) { for (e = bb->succ; e != 0; e = e->succ_next) { @@ -3819,7 +3833,7 @@ hybrid_search_sbitmap (block, in, out, gen, kill, dir, } } } - if (dir == FORWARD) + if (dir == DF_FORWARD) { for (e = bb->succ; e != 0; e = e->succ_next) { @@ -3846,8 +3860,6 @@ hybrid_search_sbitmap (block, in, out, gen, kill, dir, } - - /* gen = GEN set. kill = KILL set. in, out = Filled in by function. @@ -3883,20 +3895,23 @@ iterative_dataflow_sbitmap (in, out, gen, kill, blocks, fibheap_t worklist; basic_block bb; sbitmap visited, pending; + pending = sbitmap_alloc (last_basic_block); visited = sbitmap_alloc (last_basic_block); sbitmap_zero (pending); sbitmap_zero (visited); worklist = fibheap_new (); + EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i, { fibheap_insert (worklist, order[i], (void *) (size_t) i); SET_BIT (pending, i); - if (dir == FORWARD) + if (dir == DF_FORWARD) sbitmap_copy (out[i], gen[i]); else sbitmap_copy (in[i], gen[i]); }); + while (sbitmap_first_set_bit (pending) != -1) { while (!fibheap_empty (worklist)) @@ -3907,6 +3922,7 @@ iterative_dataflow_sbitmap (in, out, gen, kill, blocks, hybrid_search_sbitmap (bb, in, out, gen, kill, dir, conf_op, transfun, visited, pending, data); } + if (sbitmap_first_set_bit (pending) != -1) { EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i, @@ -3920,13 +3936,15 @@ iterative_dataflow_sbitmap (in, out, gen, kill, blocks, break; } } + sbitmap_free (pending); sbitmap_free (visited); fibheap_delete (worklist); } + /* Exactly the same as iterative_dataflow_sbitmap, except it works on - bitmaps instead */ + bitmaps instead. */ void iterative_dataflow_bitmap (in, out, gen, kill, blocks, dir, conf_op, transfun, order, data) @@ -3942,20 +3960,23 @@ iterative_dataflow_bitmap (in, out, gen, kill, blocks, fibheap_t worklist; basic_block bb; sbitmap visited, pending; + pending = sbitmap_alloc (last_basic_block); visited = sbitmap_alloc (last_basic_block); sbitmap_zero (pending); sbitmap_zero (visited); worklist = fibheap_new (); + EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i, { fibheap_insert (worklist, order[i], (void *) (size_t) i); SET_BIT (pending, i); - if (dir == FORWARD) + if (dir == DF_FORWARD) bitmap_copy (out[i], gen[i]); else bitmap_copy (in[i], gen[i]); }); + while (sbitmap_first_set_bit (pending) != -1) { while (!fibheap_empty (worklist)) @@ -3966,6 +3987,7 @@ iterative_dataflow_bitmap (in, out, gen, kill, blocks, hybrid_search_bitmap (bb, in, out, gen, kill, dir, conf_op, transfun, visited, pending, data); } + if (sbitmap_first_set_bit (pending) != -1) { EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i,