1 /* Allocation for dataflow support routines.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
3 2008 Free Software Foundation, Inc.
4 Originally contributed by Michael P. Hayes
5 (m.hayes@elec.canterbury.ac.nz, mhayes@redhat.com)
6 Major rewrite contributed by Danny Berlin (dberlin@dberlin.org)
7 and Kenneth Zadeck (zadeck@naturalbridge.com).
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
28 The files in this collection (df*.c,df.h) provide a general framework
29 for solving dataflow problems. The global dataflow is performed using
30 a good implementation of iterative dataflow analysis.
32 The file df-problems.c provides problem instance for the most common
33 dataflow problems: reaching defs, upward exposed uses, live variables,
34 uninitialized variables, def-use chains, and use-def chains. However,
35 the interface allows other dataflow problems to be defined as well.
37 Dataflow analysis is available in most of the rtl backend (the parts
38 between pass_df_initialize and pass_df_finish). It is quite likely
39 that these boundaries will be expanded in the future. The only
40 requirement is that there be a correct control flow graph.
42 There are three variations of the live variable problem that are
43 available whenever dataflow is available. The LR problem finds the
44 areas that can reach a use of a variable, the UR problems finds the
45 areas that can be reached from a definition of a variable. The LIVE
46 problem finds the intersection of these two areas.
48 There are several optional problems. These can be enabled when they
49 are needed and disabled when they are not needed.
51 Dataflow problems are generally solved in three layers. The bottom
52 layer is called scanning where a data structure is built for each rtl
53 insn that describes the set of defs and uses of that insn. Scanning
54 is generally kept up to date, i.e. as the insns changes, the scanned
55 version of that insn changes also. There are various mechanisms for
56 making this happen and are described in the INCREMENTAL SCANNING
59 In the middle layer, basic blocks are scanned to produce transfer
60 functions which describe the effects of that block on the global
61 dataflow solution. The transfer functions are only rebuilt if the
62 some instruction within the block has changed.
64 The top layer is the dataflow solution itself. The dataflow solution
65 is computed by using an efficient iterative solver and the transfer
66 functions. The dataflow solution must be recomputed whenever the
67 control changes or if one of the transfer function changes.
72 Here is an example of using the dataflow routines.
74 df_[chain,live,note,rd]_add_problem (flags);
76 df_set_blocks (blocks);
82 df_finish_pass (false);
84 DF_[chain,live,note,rd]_ADD_PROBLEM adds a problem, defined by an
85 instance to struct df_problem, to the set of problems solved in this
86 instance of df. All calls to add a problem for a given instance of df
87 must occur before the first call to DF_ANALYZE.
89 Problems can be dependent on other problems. For instance, solving
90 def-use or use-def chains is dependent on solving reaching
91 definitions. As long as these dependencies are listed in the problem
92 definition, the order of adding the problems is not material.
93 Otherwise, the problems will be solved in the order of calls to
94 df_add_problem. Note that it is not necessary to have a problem. In
95 that case, df will just be used to do the scanning.
99 DF_SET_BLOCKS is an optional call used to define a region of the
100 function on which the analysis will be performed. The normal case is
101 to analyze the entire function and no call to df_set_blocks is made.
102 DF_SET_BLOCKS only effects the blocks that are effected when computing
103 the transfer functions and final solution. The insn level information
104 is always kept up to date.
106 When a subset is given, the analysis behaves as if the function only
107 contains those blocks and any edges that occur directly between the
108 blocks in the set. Care should be taken to call df_set_blocks right
109 before the call to analyze in order to eliminate the possibility that
110 optimizations that reorder blocks invalidate the bitvector.
112 DF_ANALYZE causes all of the defined problems to be (re)solved. When
113 DF_ANALYZE is completes, the IN and OUT sets for each basic block
114 contain the computer information. The DF_*_BB_INFO macros can be used
115 to access these bitvectors. All deferred rescannings are down before
116 the transfer functions are recomputed.
118 DF_DUMP can then be called to dump the information produce to some
119 file. This calls DF_DUMP_START, to print the information that is not
120 basic block specific, and then calls DF_DUMP_TOP and DF_DUMP_BOTTOM
121 for each block to print the basic specific information. These parts
122 can all be called separately as part of a larger dump function.
125 DF_FINISH_PASS causes df_remove_problem to be called on all of the
126 optional problems. It also causes any insns whose scanning has been
127 deferred to be rescanned as well as clears all of the changeable flags.
128 Setting the pass manager TODO_df_finish flag causes this function to
129 be run. However, the pass manager will call df_finish_pass AFTER the
130 pass dumping has been done, so if you want to see the results of the
131 optional problems in the pass dumps, use the TODO flag rather than
132 calling the function yourself.
136 There are four ways of doing the incremental scanning:
138 1) Immediate rescanning - Calls to df_insn_rescan, df_notes_rescan,
139 df_bb_delete, df_insn_change_bb have been added to most of
140 the low level service functions that maintain the cfg and change
141 rtl. Calling and of these routines many cause some number of insns
144 For most modern rtl passes, this is certainly the easiest way to
145 manage rescanning the insns. This technique also has the advantage
146 that the scanning information is always correct and can be relied
147 upon even after changes have been made to the instructions. This
148 technique is contra indicated in several cases:
150 a) If def-use chains OR use-def chains (but not both) are built,
151 using this is SIMPLY WRONG. The problem is that when a ref is
152 deleted that is the target of an edge, there is not enough
153 information to efficiently find the source of the edge and
154 delete the edge. This leaves a dangling reference that may
157 b) If def-use chains AND use-def chains are built, this may
158 produce unexpected results. The problem is that the incremental
159 scanning of an insn does not know how to repair the chains that
160 point into an insn when the insn changes. So the incremental
161 scanning just deletes the chains that enter and exit the insn
162 being changed. The dangling reference issue in (a) is not a
163 problem here, but if the pass is depending on the chains being
164 maintained after insns have been modified, this technique will
165 not do the correct thing.
167 c) If the pass modifies insns several times, this incremental
168 updating may be expensive.
170 d) If the pass modifies all of the insns, as does register
171 allocation, it is simply better to rescan the entire function.
173 e) If the pass uses either non-standard or ancient techniques to
174 modify insns, automatic detection of the insns that need to be
175 rescanned may be impractical. Cse and regrename fall into this
178 2) Deferred rescanning - Calls to df_insn_rescan, df_notes_rescan, and
179 df_insn_delete do not immediately change the insn but instead make
180 a note that the insn needs to be rescanned. The next call to
181 df_analyze, df_finish_pass, or df_process_deferred_rescans will
182 cause all of the pending rescans to be processed.
184 This is the technique of choice if either 1a, 1b, or 1c are issues
185 in the pass. In the case of 1a or 1b, a call to df_remove_problem
186 (df_chain) should be made before the next call to df_analyze or
187 df_process_deferred_rescans.
189 To enable this mode, call df_set_flags (DF_DEFER_INSN_RESCAN).
190 (This mode can be cleared by calling df_clear_flags
191 (DF_DEFER_INSN_RESCAN) but this does not cause the deferred insns to
194 3) Total rescanning - In this mode the rescanning is disabled.
195 However, the df information associated with deleted insn is delete
196 at the time the insn is deleted. At the end of the pass, a call
197 must be made to df_insn_rescan_all. This method is used by the
198 register allocator since it generally changes each insn multiple
199 times (once for each ref) and does not need to make use of the
200 updated scanning information.
202 It is also currently used by two older passes (cse, and regrename)
203 which change insns in hard to track ways. It is hoped that this
204 will be fixed soon since this it is expensive to rescan all of the
205 insns when only a small number of them have really changed.
207 4) Do it yourself - In this mechanism, the pass updates the insns
208 itself using the low level df primitives. Currently no pass does
209 this, but it has the advantage that it is quite efficient given
210 that the pass generally has exact knowledge of what it is changing.
214 Scanning produces a `struct df_ref' data structure (ref) is allocated
215 for every register reference (def or use) and this records the insn
216 and bb the ref is found within. The refs are linked together in
217 chains of uses and defs for each insn and for each register. Each ref
218 also has a chain field that links all the use refs for a def or all
219 the def refs for a use. This is used to create use-def or def-use
222 Different optimizations have different needs. Ultimately, only
223 register allocation and schedulers should be using the bitmaps
224 produced for the live register and uninitialized register problems.
225 The rest of the backend should be upgraded to using and maintaining
226 the linked information such as def use or use def chains.
231 While incremental bitmaps are not worthwhile to maintain, incremental
232 chains may be perfectly reasonable. The fastest way to build chains
233 from scratch or after significant modifications is to build reaching
234 definitions (RD) and build the chains from this.
236 However, general algorithms for maintaining use-def or def-use chains
237 are not practical. The amount of work to recompute the chain any
238 chain after an arbitrary change is large. However, with a modest
239 amount of work it is generally possible to have the application that
240 uses the chains keep them up to date. The high level knowledge of
241 what is really happening is essential to crafting efficient
242 incremental algorithms.
244 As for the bit vector problems, there is no interface to give a set of
245 blocks over with to resolve the iteration. In general, restarting a
246 dataflow iteration is difficult and expensive. Again, the best way to
247 keep the dataflow information up to data (if this is really what is
248 needed) it to formulate a problem specific solution.
250 There are fine grained calls for creating and deleting references from
251 instructions in df-scan.c. However, these are not currently connected
252 to the engine that resolves the dataflow equations.
257 The basic object is a DF_REF (reference) and this may either be a
258 DEF (definition) or a USE of a register.
260 These are linked into a variety of lists; namely reg-def, reg-use,
261 insn-def, insn-use, def-use, and use-def lists. For example, the
262 reg-def lists contain all the locations that define a given register
263 while the insn-use lists contain all the locations that use a
266 Note that the reg-def and reg-use chains are generally short for
267 pseudos and long for the hard registers.
271 1) The df insn information is kept in the insns array. This array is
274 2) Each insn has three sets of refs: They are linked into one of three
275 lists: the insn's defs list (accessed by the DF_INSN_DEFS or
276 DF_INSN_UID_DEFS macros), the insn's uses list (accessed by the
277 DF_INSN_USES or DF_INSN_UID_USES macros) or the insn's eq_uses list
278 (accessed by the DF_INSN_EQ_USES or DF_INSN_UID_EQ_USES macros).
279 The latter list are the list of references in REG_EQUAL or
280 REG_EQUIV notes. These macros produce a ref (or NULL), the rest of
281 the list can be obtained by traversal of the NEXT_REF field
282 (accessed by the DF_REF_NEXT_REF macro.) There is no significance
283 to the ordering of the uses or refs in an instruction.
285 3) Each insn has a logical uid field (LUID). When properly set, this
286 is an integer that numbers each insn in the basic block, in order from
287 the start of the block. The numbers are only correct after a call to
288 df_analyse. They will rot after insns are added deleted or moved
293 There are 4 ways to obtain access to refs:
295 1) References are divided into two categories, REAL and ARTIFICIAL.
297 REAL refs are associated with instructions.
299 ARTIFICIAL refs are associated with basic blocks. The heads of
300 these lists can be accessed by calling df_get_artificial_defs or
301 df_get_artificial_uses for the particular basic block.
303 Artificial defs and uses occur both at the beginning and ends of blocks.
305 For blocks that area at the destination of eh edges, the
306 artificial uses and defs occur at the beginning. The defs relate
307 to the registers specified in EH_RETURN_DATA_REGNO and the uses
308 relate to the registers specified in ED_USES. Logically these
309 defs and uses should really occur along the eh edge, but there is
310 no convenient way to do this. Artificial edges that occur at the
311 beginning of the block have the DF_REF_AT_TOP flag set.
313 Artificial uses occur at the end of all blocks. These arise from
314 the hard registers that are always live, such as the stack
315 register and are put there to keep the code from forgetting about
318 Artificial defs occur at the end of the entry block. These arise
319 from registers that are live at entry to the function.
321 2) There are three types of refs: defs, uses and eq_uses. (Eq_uses are
322 uses that appear inside a REG_EQUAL or REG_EQUIV note.)
324 All of the eq_uses, uses and defs associated with each pseudo or
325 hard register may be linked in a bidirectional chain. These are
326 called reg-use or reg_def chains. If the changeable flag
327 DF_EQ_NOTES is set when the chains are built, the eq_uses will be
328 treated like uses. If it is not set they are ignored.
330 The first use, eq_use or def for a register can be obtained using
331 the DF_REG_USE_CHAIN, DF_REG_EQ_USE_CHAIN or DF_REG_DEF_CHAIN
332 macros. Subsequent uses for the same regno can be obtained by
333 following the next_reg field of the ref. The number of elements in
334 each of the chains can be found by using the DF_REG_USE_COUNT,
335 DF_REG_EQ_USE_COUNT or DF_REG_DEF_COUNT macros.
337 In previous versions of this code, these chains were ordered. It
338 has not been practical to continue this practice.
340 3) If def-use or use-def chains are built, these can be traversed to
341 get to other refs. If the flag DF_EQ_NOTES has been set, the chains
342 include the eq_uses. Otherwise these are ignored when building the
345 4) An array of all of the uses (and an array of all of the defs) can
346 be built. These arrays are indexed by the value in the id
347 structure. These arrays are only lazily kept up to date, and that
348 process can be expensive. To have these arrays built, call
349 df_reorganize_defs or df_reorganize_uses. If the flag DF_EQ_NOTES
350 has been set the array will contain the eq_uses. Otherwise these
351 are ignored when building the array and assigning the ids. Note
352 that the values in the id field of a ref may change across calls to
353 df_analyze or df_reorganize_defs or df_reorganize_uses.
355 If the only use of this array is to find all of the refs, it is
356 better to traverse all of the registers and then traverse all of
357 reg-use or reg-def chains.
361 Embedded addressing side-effects, such as POST_INC or PRE_INC, generate
362 both a use and a def. These are both marked read/write to show that they
363 are dependent. For example, (set (reg 40) (mem (post_inc (reg 42))))
364 will generate a use of reg 42 followed by a def of reg 42 (both marked
365 read/write). Similarly, (set (reg 40) (mem (pre_dec (reg 41))))
366 generates a use of reg 41 then a def of reg 41 (both marked read/write),
367 even though reg 41 is decremented before it is used for the memory
368 address in this second example.
370 A set to a REG inside a ZERO_EXTRACT, or a set to a non-paradoxical SUBREG
371 for which the number of word_mode units covered by the outer mode is
372 smaller than that covered by the inner mode, invokes a read-modify-write
373 operation. We generate both a use and a def and again mark them
376 Paradoxical subreg writes do not leave a trace of the old content, so they
377 are write-only operations.
383 #include "coretypes.h"
387 #include "insn-config.h"
389 #include "function.h"
392 #include "alloc-pool.h"
394 #include "hard-reg-set.h"
395 #include "basic-block.h"
400 #include "tree-pass.h"
403 static void *df_get_bb_info (struct dataflow *, unsigned int);
404 static void df_set_bb_info (struct dataflow *, unsigned int, void *);
406 static void df_set_clean_cfg (void);
409 /* An obstack for bitmap not related to specific dataflow problems.
410 This obstack should e.g. be used for bitmaps with a short life time
411 such as temporary bitmaps. */
413 bitmap_obstack df_bitmap_obstack;
416 /*----------------------------------------------------------------------------
417 Functions to create, destroy and manipulate an instance of df.
418 ----------------------------------------------------------------------------*/
422 /* Add PROBLEM (and any dependent problems) to the DF instance. */
425 df_add_problem (struct df_problem *problem)
427 struct dataflow *dflow;
430 /* First try to add the dependent problem. */
431 if (problem->dependent_problem)
432 df_add_problem (problem->dependent_problem);
434 /* Check to see if this problem has already been defined. If it
435 has, just return that instance, if not, add it to the end of the
437 dflow = df->problems_by_index[problem->id];
441 /* Make a new one and add it to the end. */
442 dflow = XCNEW (struct dataflow);
443 dflow->problem = problem;
444 dflow->computed = false;
445 dflow->solutions_dirty = true;
446 df->problems_by_index[dflow->problem->id] = dflow;
448 /* Keep the defined problems ordered by index. This solves the
449 problem that RI will use the information from UREC if UREC has
450 been defined, or from LIVE if LIVE is defined and otherwise LR.
451 However for this to work, the computation of RI must be pushed
452 after which ever of those problems is defined, but we do not
453 require any of those except for LR to have actually been
455 df->num_problems_defined++;
456 for (i = df->num_problems_defined - 2; i >= 0; i--)
458 if (problem->id < df->problems_in_order[i]->problem->id)
459 df->problems_in_order[i+1] = df->problems_in_order[i];
462 df->problems_in_order[i+1] = dflow;
466 df->problems_in_order[0] = dflow;
470 /* Set the MASK flags in the DFLOW problem. The old flags are
471 returned. If a flag is not allowed to be changed this will fail if
472 checking is enabled. */
473 enum df_changeable_flags
474 df_set_flags (enum df_changeable_flags changeable_flags)
476 enum df_changeable_flags old_flags = df->changeable_flags;
477 df->changeable_flags |= changeable_flags;
482 /* Clear the MASK flags in the DFLOW problem. The old flags are
483 returned. If a flag is not allowed to be changed this will fail if
484 checking is enabled. */
485 enum df_changeable_flags
486 df_clear_flags (enum df_changeable_flags changeable_flags)
488 enum df_changeable_flags old_flags = df->changeable_flags;
489 df->changeable_flags &= ~changeable_flags;
494 /* Set the blocks that are to be considered for analysis. If this is
495 not called or is called with null, the entire function in
499 df_set_blocks (bitmap blocks)
504 bitmap_print (dump_file, blocks, "setting blocks to analyze ", "\n");
505 if (df->blocks_to_analyze)
507 /* This block is called to change the focus from one subset
510 bitmap diff = BITMAP_ALLOC (&df_bitmap_obstack);
511 bitmap_and_compl (diff, df->blocks_to_analyze, blocks);
512 for (p = 0; p < df->num_problems_defined; p++)
514 struct dataflow *dflow = df->problems_in_order[p];
515 if (dflow->optional_p && dflow->problem->reset_fun)
516 dflow->problem->reset_fun (df->blocks_to_analyze);
517 else if (dflow->problem->free_blocks_on_set_blocks)
520 unsigned int bb_index;
522 EXECUTE_IF_SET_IN_BITMAP (diff, 0, bb_index, bi)
524 basic_block bb = BASIC_BLOCK (bb_index);
527 void *bb_info = df_get_bb_info (dflow, bb_index);
530 dflow->problem->free_bb_fun (bb, bb_info);
531 df_set_bb_info (dflow, bb_index, NULL);
542 /* This block of code is executed to change the focus from
543 the entire function to a subset. */
544 bitmap blocks_to_reset = NULL;
546 for (p = 0; p < df->num_problems_defined; p++)
548 struct dataflow *dflow = df->problems_in_order[p];
549 if (dflow->optional_p && dflow->problem->reset_fun)
551 if (!blocks_to_reset)
555 BITMAP_ALLOC (&df_bitmap_obstack);
558 bitmap_set_bit (blocks_to_reset, bb->index);
561 dflow->problem->reset_fun (blocks_to_reset);
565 BITMAP_FREE (blocks_to_reset);
567 df->blocks_to_analyze = BITMAP_ALLOC (&df_bitmap_obstack);
569 bitmap_copy (df->blocks_to_analyze, blocks);
570 df->analyze_subset = true;
574 /* This block is executed to reset the focus to the entire
577 fprintf (dump_file, "clearing blocks_to_analyze\n");
578 if (df->blocks_to_analyze)
580 BITMAP_FREE (df->blocks_to_analyze);
581 df->blocks_to_analyze = NULL;
583 df->analyze_subset = false;
586 /* Setting the blocks causes the refs to be unorganized since only
587 the refs in the blocks are seen. */
588 df_maybe_reorganize_def_refs (DF_REF_ORDER_NO_TABLE);
589 df_maybe_reorganize_use_refs (DF_REF_ORDER_NO_TABLE);
590 df_mark_solutions_dirty ();
594 /* Delete a DFLOW problem (and any problems that depend on this
598 df_remove_problem (struct dataflow *dflow)
600 struct df_problem *problem;
606 problem = dflow->problem;
607 gcc_assert (problem->remove_problem_fun);
609 /* Delete any problems that depended on this problem first. */
610 for (i = 0; i < df->num_problems_defined; i++)
611 if (df->problems_in_order[i]->problem->dependent_problem == problem)
612 df_remove_problem (df->problems_in_order[i]);
614 /* Now remove this problem. */
615 for (i = 0; i < df->num_problems_defined; i++)
616 if (df->problems_in_order[i] == dflow)
619 for (j = i + 1; j < df->num_problems_defined; j++)
620 df->problems_in_order[j-1] = df->problems_in_order[j];
621 df->problems_in_order[j] = NULL;
622 df->num_problems_defined--;
626 (problem->remove_problem_fun) ();
627 df->problems_by_index[problem->id] = NULL;
631 /* Remove all of the problems that are not permanent. Scanning, LR
632 and (at -O2 or higher) LIVE are permanent, the rest are removable.
633 Also clear all of the changeable_flags. */
636 df_finish_pass (bool verify ATTRIBUTE_UNUSED)
641 #ifdef ENABLE_DF_CHECKING
642 enum df_changeable_flags saved_flags;
648 df_maybe_reorganize_def_refs (DF_REF_ORDER_NO_TABLE);
649 df_maybe_reorganize_use_refs (DF_REF_ORDER_NO_TABLE);
651 #ifdef ENABLE_DF_CHECKING
652 saved_flags = df->changeable_flags;
655 for (i = 0; i < df->num_problems_defined; i++)
657 struct dataflow *dflow = df->problems_in_order[i];
658 struct df_problem *problem = dflow->problem;
660 if (dflow->optional_p)
662 gcc_assert (problem->remove_problem_fun);
663 (problem->remove_problem_fun) ();
664 df->problems_in_order[i] = NULL;
665 df->problems_by_index[problem->id] = NULL;
669 df->num_problems_defined -= removed;
671 /* Clear all of the flags. */
672 df->changeable_flags = 0;
673 df_process_deferred_rescans ();
675 /* Set the focus back to the whole function. */
676 if (df->blocks_to_analyze)
678 BITMAP_FREE (df->blocks_to_analyze);
679 df->blocks_to_analyze = NULL;
680 df_mark_solutions_dirty ();
681 df->analyze_subset = false;
684 #ifdef ENABLE_DF_CHECKING
685 /* Verification will fail in DF_NO_INSN_RESCAN. */
686 if (!(saved_flags & DF_NO_INSN_RESCAN))
688 df_lr_verify_transfer_functions ();
690 df_live_verify_transfer_functions ();
698 #ifdef ENABLE_CHECKING
700 df->changeable_flags |= DF_VERIFY_SCHEDULED;
705 /* Set up the dataflow instance for the entire back end. */
708 rest_of_handle_df_initialize (void)
711 df = XCNEW (struct df);
712 df->changeable_flags = 0;
714 bitmap_obstack_initialize (&df_bitmap_obstack);
716 /* Set this to a conservative value. Stack_ptr_mod will compute it
718 current_function_sp_is_unchanging = 0;
720 df_scan_add_problem ();
721 df_scan_alloc (NULL);
723 /* These three problems are permanent. */
724 df_lr_add_problem ();
726 df_live_add_problem ();
728 df->postorder = XNEWVEC (int, last_basic_block);
729 df->postorder_inverted = XNEWVEC (int, last_basic_block);
730 df->n_blocks = post_order_compute (df->postorder, true, true);
731 df->n_blocks_inverted = inverted_post_order_compute (df->postorder_inverted);
732 gcc_assert (df->n_blocks == df->n_blocks_inverted);
734 df->hard_regs_live_count = XNEWVEC (unsigned int, FIRST_PSEUDO_REGISTER);
735 memset (df->hard_regs_live_count, 0,
736 sizeof (unsigned int) * FIRST_PSEUDO_REGISTER);
739 /* After reload, some ports add certain bits to regs_ever_live so
740 this cannot be reset. */
741 df_compute_regs_ever_live (true);
743 df_compute_regs_ever_live (false);
755 struct rtl_opt_pass pass_df_initialize_opt =
761 rest_of_handle_df_initialize, /* execute */
764 0, /* static_pass_number */
766 0, /* properties_required */
767 0, /* properties_provided */
768 0, /* properties_destroyed */
769 0, /* todo_flags_start */
770 0 /* todo_flags_finish */
778 return optimize == 0;
782 struct rtl_opt_pass pass_df_initialize_no_opt =
787 gate_no_opt, /* gate */
788 rest_of_handle_df_initialize, /* execute */
791 0, /* static_pass_number */
793 0, /* properties_required */
794 0, /* properties_provided */
795 0, /* properties_destroyed */
796 0, /* todo_flags_start */
797 0 /* todo_flags_finish */
802 /* Free all the dataflow info and the DF structure. This should be
803 called from the df_finish macro which also NULLs the parm. */
806 rest_of_handle_df_finish (void)
812 for (i = 0; i < df->num_problems_defined; i++)
814 struct dataflow *dflow = df->problems_in_order[i];
815 dflow->problem->free_fun ();
819 free (df->postorder);
820 if (df->postorder_inverted)
821 free (df->postorder_inverted);
822 free (df->hard_regs_live_count);
826 bitmap_obstack_release (&df_bitmap_obstack);
831 struct rtl_opt_pass pass_df_finish =
835 "dfinish", /* name */
837 rest_of_handle_df_finish, /* execute */
840 0, /* static_pass_number */
842 0, /* properties_required */
843 0, /* properties_provided */
844 0, /* properties_destroyed */
845 0, /* todo_flags_start */
846 0 /* todo_flags_finish */
854 /*----------------------------------------------------------------------------
855 The general data flow analysis engine.
856 ----------------------------------------------------------------------------*/
859 /* Helper function for df_worklist_dataflow.
860 Propagate the dataflow forward.
861 Given a BB_INDEX, do the dataflow propagation
862 and set bits on for successors in PENDING
863 if the out set of the dataflow has changed. */
866 df_worklist_propagate_forward (struct dataflow *dataflow,
868 unsigned *bbindex_to_postorder,
874 basic_block bb = BASIC_BLOCK (bb_index);
876 /* Calculate <conf_op> of incoming edges. */
877 if (EDGE_COUNT (bb->preds) > 0)
878 FOR_EACH_EDGE (e, ei, bb->preds)
880 if (TEST_BIT (considered, e->src->index))
881 dataflow->problem->con_fun_n (e);
883 else if (dataflow->problem->con_fun_0)
884 dataflow->problem->con_fun_0 (bb);
886 if (dataflow->problem->trans_fun (bb_index))
888 /* The out set of this block has changed.
889 Propagate to the outgoing blocks. */
890 FOR_EACH_EDGE (e, ei, bb->succs)
892 unsigned ob_index = e->dest->index;
894 if (TEST_BIT (considered, ob_index))
895 bitmap_set_bit (pending, bbindex_to_postorder[ob_index]);
901 /* Helper function for df_worklist_dataflow.
902 Propagate the dataflow backward. */
905 df_worklist_propagate_backward (struct dataflow *dataflow,
907 unsigned *bbindex_to_postorder,
913 basic_block bb = BASIC_BLOCK (bb_index);
915 /* Calculate <conf_op> of incoming edges. */
916 if (EDGE_COUNT (bb->succs) > 0)
917 FOR_EACH_EDGE (e, ei, bb->succs)
919 if (TEST_BIT (considered, e->dest->index))
920 dataflow->problem->con_fun_n (e);
922 else if (dataflow->problem->con_fun_0)
923 dataflow->problem->con_fun_0 (bb);
925 if (dataflow->problem->trans_fun (bb_index))
927 /* The out set of this block has changed.
928 Propagate to the outgoing blocks. */
929 FOR_EACH_EDGE (e, ei, bb->preds)
931 unsigned ob_index = e->src->index;
933 if (TEST_BIT (considered, ob_index))
934 bitmap_set_bit (pending, bbindex_to_postorder[ob_index]);
941 /* This will free "pending". */
943 df_worklist_dataflow_overeager (struct dataflow *dataflow,
946 int *blocks_in_postorder,
947 unsigned *bbindex_to_postorder)
949 enum df_flow_dir dir = dataflow->problem->dir;
952 while (!bitmap_empty_p (pending))
958 index = bitmap_first_set_bit (pending);
959 bitmap_clear_bit (pending, index);
961 bb_index = blocks_in_postorder[index];
963 if (dir == DF_FORWARD)
964 df_worklist_propagate_forward (dataflow, bb_index,
965 bbindex_to_postorder,
966 pending, considered);
968 df_worklist_propagate_backward (dataflow, bb_index,
969 bbindex_to_postorder,
970 pending, considered);
973 BITMAP_FREE (pending);
975 /* Dump statistics. */
977 fprintf (dump_file, "df_worklist_dataflow_overeager:"
978 "n_basic_blocks %d n_edges %d"
979 " count %d (%5.2g)\n",
980 n_basic_blocks, n_edges,
981 count, count / (float)n_basic_blocks);
985 df_worklist_dataflow_doublequeue (struct dataflow *dataflow,
988 int *blocks_in_postorder,
989 unsigned *bbindex_to_postorder)
991 enum df_flow_dir dir = dataflow->problem->dir;
993 bitmap worklist = BITMAP_ALLOC (&df_bitmap_obstack);
995 /* Double-queueing. Worklist is for the current iteration,
996 and pending is for the next. */
997 while (!bitmap_empty_p (pending))
999 /* Swap pending and worklist. */
1000 bitmap temp = worklist;
1010 index = bitmap_first_set_bit (worklist);
1011 bitmap_clear_bit (worklist, index);
1013 bb_index = blocks_in_postorder[index];
1015 if (dir == DF_FORWARD)
1016 df_worklist_propagate_forward (dataflow, bb_index,
1017 bbindex_to_postorder,
1018 pending, considered);
1020 df_worklist_propagate_backward (dataflow, bb_index,
1021 bbindex_to_postorder,
1022 pending, considered);
1024 while (!bitmap_empty_p (worklist));
1027 BITMAP_FREE (worklist);
1028 BITMAP_FREE (pending);
1030 /* Dump statistics. */
1032 fprintf (dump_file, "df_worklist_dataflow_doublequeue:"
1033 "n_basic_blocks %d n_edges %d"
1034 " count %d (%5.2g)\n",
1035 n_basic_blocks, n_edges,
1036 dcount, dcount / (float)n_basic_blocks);
1039 /* Worklist-based dataflow solver. It uses sbitmap as a worklist,
1040 with "n"-th bit representing the n-th block in the reverse-postorder order.
1041 This is so-called over-eager algorithm where it propagates
1042 changes on demand. This algorithm may visit blocks more than
1043 iterative method if there are deeply nested loops.
1044 Worklist algorithm works better than iterative algorithm
1045 for CFGs with no nested loops.
1046 In practice, the measurement shows worklist algorithm beats
1047 iterative algorithm by some margin overall.
1048 Note that this is slightly different from the traditional textbook worklist solver,
1049 in that the worklist is effectively sorted by the reverse postorder.
1050 For CFGs with no nested loops, this is optimal.
1052 The overeager algorithm while works well for typical inputs,
1053 it could degenerate into excessive iterations given CFGs with high loop nests
1054 and unstructured loops. To cap the excessive iteration on such case,
1055 we switch to double-queueing when the original algorithm seems to
1060 df_worklist_dataflow (struct dataflow *dataflow,
1061 bitmap blocks_to_consider,
1062 int *blocks_in_postorder,
1065 bitmap pending = BITMAP_ALLOC (&df_bitmap_obstack);
1066 sbitmap considered = sbitmap_alloc (last_basic_block);
1068 unsigned int *bbindex_to_postorder;
1071 enum df_flow_dir dir = dataflow->problem->dir;
1073 gcc_assert (dir != DF_NONE);
1075 /* BBINDEX_TO_POSTORDER maps the bb->index to the reverse postorder. */
1076 bbindex_to_postorder =
1077 (unsigned int *)xmalloc (last_basic_block * sizeof (unsigned int));
1079 /* Initialize the array to an out-of-bound value. */
1080 for (i = 0; i < last_basic_block; i++)
1081 bbindex_to_postorder[i] = last_basic_block;
1083 /* Initialize the considered map. */
1084 sbitmap_zero (considered);
1085 EXECUTE_IF_SET_IN_BITMAP (blocks_to_consider, 0, index, bi)
1087 SET_BIT (considered, index);
1090 /* Initialize the mapping of block index to postorder. */
1091 for (i = 0; i < n_blocks; i++)
1093 bbindex_to_postorder[blocks_in_postorder[i]] = i;
1094 /* Add all blocks to the worklist. */
1095 bitmap_set_bit (pending, i);
1098 /* Initialize the problem. */
1099 if (dataflow->problem->init_fun)
1100 dataflow->problem->init_fun (blocks_to_consider);
1102 /* Solve it. Determine the solving algorithm
1103 based on a simple heuristic. */
1104 if (n_edges > PARAM_VALUE (PARAM_DF_DOUBLE_QUEUE_THRESHOLD_FACTOR)
1107 /* High average connectivity, meaning dense graph
1108 with more likely deep nested loops
1109 or unstructured loops. */
1110 df_worklist_dataflow_doublequeue (dataflow, pending, considered,
1111 blocks_in_postorder,
1112 bbindex_to_postorder);
1116 /* Most inputs fall into this case
1117 with relatively flat or structured CFG. */
1118 df_worklist_dataflow_overeager (dataflow, pending, considered,
1119 blocks_in_postorder,
1120 bbindex_to_postorder);
1123 sbitmap_free (considered);
1124 free (bbindex_to_postorder);
1128 /* Remove the entries not in BLOCKS from the LIST of length LEN, preserving
1129 the order of the remaining entries. Returns the length of the resulting
1133 df_prune_to_subcfg (int list[], unsigned len, bitmap blocks)
1137 for (act = 0, last = 0; act < len; act++)
1138 if (bitmap_bit_p (blocks, list[act]))
1139 list[last++] = list[act];
1145 /* Execute dataflow analysis on a single dataflow problem.
1147 BLOCKS_TO_CONSIDER are the blocks whose solution can either be
1148 examined or will be computed. For calls from DF_ANALYZE, this is
1149 the set of blocks that has been passed to DF_SET_BLOCKS.
1153 df_analyze_problem (struct dataflow *dflow,
1154 bitmap blocks_to_consider,
1155 int *postorder, int n_blocks)
1157 timevar_push (dflow->problem->tv_id);
1159 #ifdef ENABLE_DF_CHECKING
1160 if (dflow->problem->verify_start_fun)
1161 dflow->problem->verify_start_fun ();
1164 /* (Re)Allocate the datastructures necessary to solve the problem. */
1165 if (dflow->problem->alloc_fun)
1166 dflow->problem->alloc_fun (blocks_to_consider);
1168 /* Set up the problem and compute the local information. */
1169 if (dflow->problem->local_compute_fun)
1170 dflow->problem->local_compute_fun (blocks_to_consider);
1172 /* Solve the equations. */
1173 if (dflow->problem->dataflow_fun)
1174 dflow->problem->dataflow_fun (dflow, blocks_to_consider,
1175 postorder, n_blocks);
1177 /* Massage the solution. */
1178 if (dflow->problem->finalize_fun)
1179 dflow->problem->finalize_fun (blocks_to_consider);
1181 #ifdef ENABLE_DF_CHECKING
1182 if (dflow->problem->verify_end_fun)
1183 dflow->problem->verify_end_fun ();
1186 timevar_pop (dflow->problem->tv_id);
1188 dflow->computed = true;
1192 /* Analyze dataflow info for the basic blocks specified by the bitmap
1193 BLOCKS, or for the whole CFG if BLOCKS is zero. */
1198 bitmap current_all_blocks = BITMAP_ALLOC (&df_bitmap_obstack);
1203 free (df->postorder);
1204 if (df->postorder_inverted)
1205 free (df->postorder_inverted);
1206 df->postorder = XNEWVEC (int, last_basic_block);
1207 df->postorder_inverted = XNEWVEC (int, last_basic_block);
1208 df->n_blocks = post_order_compute (df->postorder, true, true);
1209 df->n_blocks_inverted = inverted_post_order_compute (df->postorder_inverted);
1211 /* These should be the same. */
1212 gcc_assert (df->n_blocks == df->n_blocks_inverted);
1214 /* We need to do this before the df_verify_all because this is
1215 not kept incrementally up to date. */
1216 df_compute_regs_ever_live (false);
1217 df_process_deferred_rescans ();
1220 fprintf (dump_file, "df_analyze called\n");
1222 #ifndef ENABLE_DF_CHECKING
1223 if (df->changeable_flags & DF_VERIFY_SCHEDULED)
1227 for (i = 0; i < df->n_blocks; i++)
1228 bitmap_set_bit (current_all_blocks, df->postorder[i]);
1230 #ifdef ENABLE_CHECKING
1231 /* Verify that POSTORDER_INVERTED only contains blocks reachable from
1233 for (i = 0; i < df->n_blocks_inverted; i++)
1234 gcc_assert (bitmap_bit_p (current_all_blocks, df->postorder_inverted[i]));
1237 /* Make sure that we have pruned any unreachable blocks from these
1239 if (df->analyze_subset)
1242 bitmap_and_into (df->blocks_to_analyze, current_all_blocks);
1243 df->n_blocks = df_prune_to_subcfg (df->postorder,
1244 df->n_blocks, df->blocks_to_analyze);
1245 df->n_blocks_inverted = df_prune_to_subcfg (df->postorder_inverted,
1246 df->n_blocks_inverted,
1247 df->blocks_to_analyze);
1248 BITMAP_FREE (current_all_blocks);
1253 df->blocks_to_analyze = current_all_blocks;
1254 current_all_blocks = NULL;
1257 /* Skip over the DF_SCAN problem. */
1258 for (i = 1; i < df->num_problems_defined; i++)
1260 struct dataflow *dflow = df->problems_in_order[i];
1261 if (dflow->solutions_dirty)
1263 if (dflow->problem->dir == DF_FORWARD)
1264 df_analyze_problem (dflow,
1265 df->blocks_to_analyze,
1266 df->postorder_inverted,
1267 df->n_blocks_inverted);
1269 df_analyze_problem (dflow,
1270 df->blocks_to_analyze,
1278 BITMAP_FREE (df->blocks_to_analyze);
1279 df->blocks_to_analyze = NULL;
1283 df_set_clean_cfg ();
1288 /* Return the number of basic blocks from the last call to df_analyze. */
1291 df_get_n_blocks (enum df_flow_dir dir)
1293 gcc_assert (dir != DF_NONE);
1295 if (dir == DF_FORWARD)
1297 gcc_assert (df->postorder_inverted);
1298 return df->n_blocks_inverted;
1301 gcc_assert (df->postorder);
1302 return df->n_blocks;
1306 /* Return a pointer to the array of basic blocks in the reverse postorder.
1307 Depending on the direction of the dataflow problem,
1308 it returns either the usual reverse postorder array
1309 or the reverse postorder of inverted traversal. */
1311 df_get_postorder (enum df_flow_dir dir)
1313 gcc_assert (dir != DF_NONE);
1315 if (dir == DF_FORWARD)
1317 gcc_assert (df->postorder_inverted);
1318 return df->postorder_inverted;
1320 gcc_assert (df->postorder);
1321 return df->postorder;
1324 static struct df_problem user_problem;
1325 static struct dataflow user_dflow;
1327 /* Interface for calling iterative dataflow with user defined
1328 confluence and transfer functions. All that is necessary is to
1329 supply DIR, a direction, CONF_FUN_0, a confluence function for
1330 blocks with no logical preds (or NULL), CONF_FUN_N, the normal
1331 confluence function, TRANS_FUN, the basic block transfer function,
1332 and BLOCKS, the set of blocks to examine, POSTORDER the blocks in
1333 postorder, and N_BLOCKS, the number of blocks in POSTORDER. */
1336 df_simple_dataflow (enum df_flow_dir dir,
1337 df_init_function init_fun,
1338 df_confluence_function_0 con_fun_0,
1339 df_confluence_function_n con_fun_n,
1340 df_transfer_function trans_fun,
1341 bitmap blocks, int * postorder, int n_blocks)
1343 memset (&user_problem, 0, sizeof (struct df_problem));
1344 user_problem.dir = dir;
1345 user_problem.init_fun = init_fun;
1346 user_problem.con_fun_0 = con_fun_0;
1347 user_problem.con_fun_n = con_fun_n;
1348 user_problem.trans_fun = trans_fun;
1349 user_dflow.problem = &user_problem;
1350 df_worklist_dataflow (&user_dflow, blocks, postorder, n_blocks);
1355 /*----------------------------------------------------------------------------
1356 Functions to support limited incremental change.
1357 ----------------------------------------------------------------------------*/
1360 /* Get basic block info. */
1363 df_get_bb_info (struct dataflow *dflow, unsigned int index)
1365 if (dflow->block_info == NULL)
1367 if (index >= dflow->block_info_size)
1369 return (struct df_scan_bb_info *) dflow->block_info[index];
1373 /* Set basic block info. */
1376 df_set_bb_info (struct dataflow *dflow, unsigned int index,
1379 gcc_assert (dflow->block_info);
1380 dflow->block_info[index] = bb_info;
1384 /* Mark the solutions as being out of date. */
1387 df_mark_solutions_dirty (void)
1392 for (p = 1; p < df->num_problems_defined; p++)
1393 df->problems_in_order[p]->solutions_dirty = true;
1398 /* Return true if BB needs it's transfer functions recomputed. */
1401 df_get_bb_dirty (basic_block bb)
1404 return bitmap_bit_p (df_live->out_of_date_transfer_functions, bb->index);
1410 /* Mark BB as needing it's transfer functions as being out of
1414 df_set_bb_dirty (basic_block bb)
1419 for (p = 1; p < df->num_problems_defined; p++)
1421 struct dataflow *dflow = df->problems_in_order[p];
1422 if (dflow->out_of_date_transfer_functions)
1423 bitmap_set_bit (dflow->out_of_date_transfer_functions, bb->index);
1425 df_mark_solutions_dirty ();
1430 /* Clear the dirty bits. This is called from places that delete
1433 df_clear_bb_dirty (basic_block bb)
1436 for (p = 1; p < df->num_problems_defined; p++)
1438 struct dataflow *dflow = df->problems_in_order[p];
1439 if (dflow->out_of_date_transfer_functions)
1440 bitmap_clear_bit (dflow->out_of_date_transfer_functions, bb->index);
1443 /* Called from the rtl_compact_blocks to reorganize the problems basic
1447 df_compact_blocks (void)
1451 void **problem_temps;
1452 int size = last_basic_block * sizeof (void *);
1453 bitmap tmp = BITMAP_ALLOC (&df_bitmap_obstack);
1454 problem_temps = xmalloc (size);
1456 for (p = 0; p < df->num_problems_defined; p++)
1458 struct dataflow *dflow = df->problems_in_order[p];
1460 /* Need to reorganize the out_of_date_transfer_functions for the
1462 if (dflow->out_of_date_transfer_functions)
1464 bitmap_copy (tmp, dflow->out_of_date_transfer_functions);
1465 bitmap_clear (dflow->out_of_date_transfer_functions);
1466 if (bitmap_bit_p (tmp, ENTRY_BLOCK))
1467 bitmap_set_bit (dflow->out_of_date_transfer_functions, ENTRY_BLOCK);
1468 if (bitmap_bit_p (tmp, EXIT_BLOCK))
1469 bitmap_set_bit (dflow->out_of_date_transfer_functions, EXIT_BLOCK);
1471 i = NUM_FIXED_BLOCKS;
1474 if (bitmap_bit_p (tmp, bb->index))
1475 bitmap_set_bit (dflow->out_of_date_transfer_functions, i);
1480 /* Now shuffle the block info for the problem. */
1481 if (dflow->problem->free_bb_fun)
1483 df_grow_bb_info (dflow);
1484 memcpy (problem_temps, dflow->block_info, size);
1486 /* Copy the bb info from the problem tmps to the proper
1487 place in the block_info vector. Null out the copied
1488 item. The entry and exit blocks never move. */
1489 i = NUM_FIXED_BLOCKS;
1492 df_set_bb_info (dflow, i, problem_temps[bb->index]);
1493 problem_temps[bb->index] = NULL;
1496 memset (dflow->block_info + i, 0,
1497 (last_basic_block - i) *sizeof (void *));
1499 /* Free any block infos that were not copied (and NULLed).
1500 These are from orphaned blocks. */
1501 for (i = NUM_FIXED_BLOCKS; i < last_basic_block; i++)
1503 basic_block bb = BASIC_BLOCK (i);
1504 if (problem_temps[i] && bb)
1505 dflow->problem->free_bb_fun
1506 (bb, problem_temps[i]);
1511 /* Shuffle the bits in the basic_block indexed arrays. */
1513 if (df->blocks_to_analyze)
1515 if (bitmap_bit_p (tmp, ENTRY_BLOCK))
1516 bitmap_set_bit (df->blocks_to_analyze, ENTRY_BLOCK);
1517 if (bitmap_bit_p (tmp, EXIT_BLOCK))
1518 bitmap_set_bit (df->blocks_to_analyze, EXIT_BLOCK);
1519 bitmap_copy (tmp, df->blocks_to_analyze);
1520 bitmap_clear (df->blocks_to_analyze);
1521 i = NUM_FIXED_BLOCKS;
1524 if (bitmap_bit_p (tmp, bb->index))
1525 bitmap_set_bit (df->blocks_to_analyze, i);
1532 free (problem_temps);
1534 i = NUM_FIXED_BLOCKS;
1537 SET_BASIC_BLOCK (i, bb);
1542 gcc_assert (i == n_basic_blocks);
1544 for (; i < last_basic_block; i++)
1545 SET_BASIC_BLOCK (i, NULL);
1548 if (!df_lr->solutions_dirty)
1549 df_set_clean_cfg ();
1554 /* Shove NEW_BLOCK in at OLD_INDEX. Called from ifcvt to hack a
1555 block. There is no excuse for people to do this kind of thing. */
1558 df_bb_replace (int old_index, basic_block new_block)
1560 int new_block_index = new_block->index;
1564 fprintf (dump_file, "shoving block %d into %d\n", new_block_index, old_index);
1567 gcc_assert (BASIC_BLOCK (old_index) == NULL);
1569 for (p = 0; p < df->num_problems_defined; p++)
1571 struct dataflow *dflow = df->problems_in_order[p];
1572 if (dflow->block_info)
1574 df_grow_bb_info (dflow);
1575 gcc_assert (df_get_bb_info (dflow, old_index) == NULL);
1576 df_set_bb_info (dflow, old_index,
1577 df_get_bb_info (dflow, new_block_index));
1581 df_clear_bb_dirty (new_block);
1582 SET_BASIC_BLOCK (old_index, new_block);
1583 new_block->index = old_index;
1584 df_set_bb_dirty (BASIC_BLOCK (old_index));
1585 SET_BASIC_BLOCK (new_block_index, NULL);
1589 /* Free all of the per basic block dataflow from all of the problems.
1590 This is typically called before a basic block is deleted and the
1591 problem will be reanalyzed. */
1594 df_bb_delete (int bb_index)
1596 basic_block bb = BASIC_BLOCK (bb_index);
1602 for (i = 0; i < df->num_problems_defined; i++)
1604 struct dataflow *dflow = df->problems_in_order[i];
1605 if (dflow->problem->free_bb_fun)
1607 void *bb_info = df_get_bb_info (dflow, bb_index);
1610 dflow->problem->free_bb_fun (bb, bb_info);
1611 df_set_bb_info (dflow, bb_index, NULL);
1615 df_clear_bb_dirty (bb);
1616 df_mark_solutions_dirty ();
1620 /* Verify that there is a place for everything and everything is in
1621 its place. This is too expensive to run after every pass in the
1622 mainline. However this is an excellent debugging tool if the
1623 dataflow information is not being updated properly. You can just
1624 sprinkle calls in until you find the place that is changing an
1625 underlying structure without calling the proper updating
1632 #ifdef ENABLE_DF_CHECKING
1633 df_lr_verify_transfer_functions ();
1635 df_live_verify_transfer_functions ();
1641 /* Compute an array of ints that describes the cfg. This can be used
1642 to discover places where the cfg is modified by the appropriate
1643 calls have not been made to the keep df informed. The internals of
1644 this are unexciting, the key is that two instances of this can be
1645 compared to see if any changes have been made to the cfg. */
1648 df_compute_cfg_image (void)
1651 int size = 2 + (2 * n_basic_blocks);
1657 size += EDGE_COUNT (bb->succs);
1660 map = XNEWVEC (int, size);
1668 map[i++] = bb->index;
1669 FOR_EACH_EDGE (e, ei, bb->succs)
1670 map[i++] = e->dest->index;
1677 static int *saved_cfg = NULL;
1680 /* This function compares the saved version of the cfg with the
1681 current cfg and aborts if the two are identical. The function
1682 silently returns if the cfg has been marked as dirty or the two are
1686 df_check_cfg_clean (void)
1693 if (df_lr->solutions_dirty)
1696 if (saved_cfg == NULL)
1699 new_map = df_compute_cfg_image ();
1700 gcc_assert (memcmp (saved_cfg, new_map, saved_cfg[0] * sizeof (int)) == 0);
1705 /* This function builds a cfg fingerprint and squirrels it away in
1709 df_set_clean_cfg (void)
1713 saved_cfg = df_compute_cfg_image ();
1716 #endif /* DF_DEBUG_CFG */
1717 /*----------------------------------------------------------------------------
1718 PUBLIC INTERFACES TO QUERY INFORMATION.
1719 ----------------------------------------------------------------------------*/
1722 /* Return first def of REGNO within BB. */
1725 df_bb_regno_first_def_find (basic_block bb, unsigned int regno)
1728 struct df_ref **def_rec;
1731 FOR_BB_INSNS (bb, insn)
1736 uid = INSN_UID (insn);
1737 for (def_rec = DF_INSN_UID_DEFS (uid); *def_rec; def_rec++)
1739 struct df_ref *def = *def_rec;
1740 if (DF_REF_REGNO (def) == regno)
1748 /* Return last def of REGNO within BB. */
1751 df_bb_regno_last_def_find (basic_block bb, unsigned int regno)
1754 struct df_ref **def_rec;
1757 FOR_BB_INSNS_REVERSE (bb, insn)
1762 uid = INSN_UID (insn);
1763 for (def_rec = DF_INSN_UID_DEFS (uid); *def_rec; def_rec++)
1765 struct df_ref *def = *def_rec;
1766 if (DF_REF_REGNO (def) == regno)
1774 /* Finds the reference corresponding to the definition of REG in INSN.
1775 DF is the dataflow object. */
1778 df_find_def (rtx insn, rtx reg)
1781 struct df_ref **def_rec;
1783 if (GET_CODE (reg) == SUBREG)
1784 reg = SUBREG_REG (reg);
1785 gcc_assert (REG_P (reg));
1787 uid = INSN_UID (insn);
1788 for (def_rec = DF_INSN_UID_DEFS (uid); *def_rec; def_rec++)
1790 struct df_ref *def = *def_rec;
1791 if (rtx_equal_p (DF_REF_REAL_REG (def), reg))
1799 /* Return true if REG is defined in INSN, zero otherwise. */
1802 df_reg_defined (rtx insn, rtx reg)
1804 return df_find_def (insn, reg) != NULL;
1808 /* Finds the reference corresponding to the use of REG in INSN.
1809 DF is the dataflow object. */
1812 df_find_use (rtx insn, rtx reg)
1815 struct df_ref **use_rec;
1817 if (GET_CODE (reg) == SUBREG)
1818 reg = SUBREG_REG (reg);
1819 gcc_assert (REG_P (reg));
1821 uid = INSN_UID (insn);
1822 for (use_rec = DF_INSN_UID_USES (uid); *use_rec; use_rec++)
1824 struct df_ref *use = *use_rec;
1825 if (rtx_equal_p (DF_REF_REAL_REG (use), reg))
1828 if (df->changeable_flags & DF_EQ_NOTES)
1829 for (use_rec = DF_INSN_UID_EQ_USES (uid); *use_rec; use_rec++)
1831 struct df_ref *use = *use_rec;
1832 if (rtx_equal_p (DF_REF_REAL_REG (use), reg))
1839 /* Return true if REG is referenced in INSN, zero otherwise. */
1842 df_reg_used (rtx insn, rtx reg)
1844 return df_find_use (insn, reg) != NULL;
1848 /*----------------------------------------------------------------------------
1849 Debugging and printing functions.
1850 ----------------------------------------------------------------------------*/
1853 /* Write information about registers and basic blocks into FILE.
1854 This is part of making a debugging dump. */
1857 df_print_regset (FILE *file, bitmap r)
1863 fputs (" (nil)", file);
1866 EXECUTE_IF_SET_IN_BITMAP (r, 0, i, bi)
1868 fprintf (file, " %d", i);
1869 if (i < FIRST_PSEUDO_REGISTER)
1870 fprintf (file, " [%s]", reg_names[i]);
1873 fprintf (file, "\n");
1877 /* Write information about registers and basic blocks into FILE. The
1878 bitmap is in the form used by df_byte_lr. This is part of making a
1882 df_print_byte_regset (FILE *file, bitmap r)
1884 unsigned int max_reg = max_reg_num ();
1888 fputs (" (nil)", file);
1892 for (i = 0; i < max_reg; i++)
1894 unsigned int first = df_byte_lr_get_regno_start (i);
1895 unsigned int len = df_byte_lr_get_regno_len (i);
1902 EXECUTE_IF_SET_IN_BITMAP (r, first, j, bi)
1904 found = j < first + len;
1909 const char * sep = "";
1910 fprintf (file, " %d", i);
1911 if (i < FIRST_PSEUDO_REGISTER)
1912 fprintf (file, " [%s]", reg_names[i]);
1913 fprintf (file, "(");
1914 EXECUTE_IF_SET_IN_BITMAP (r, first, j, bi)
1916 if (j > first + len - 1)
1918 fprintf (file, "%s%d", sep, j-first);
1921 fprintf (file, ")");
1926 if (bitmap_bit_p (r, first))
1928 fprintf (file, " %d", i);
1929 if (i < FIRST_PSEUDO_REGISTER)
1930 fprintf (file, " [%s]", reg_names[i]);
1936 fprintf (file, "\n");
1940 /* Dump dataflow info. */
1943 df_dump (FILE *file)
1946 df_dump_start (file);
1950 df_print_bb_index (bb, file);
1951 df_dump_top (bb, file);
1952 df_dump_bottom (bb, file);
1955 fprintf (file, "\n");
1959 /* Dump dataflow info for df->blocks_to_analyze. */
1962 df_dump_region (FILE *file)
1964 if (df->blocks_to_analyze)
1967 unsigned int bb_index;
1969 fprintf (file, "\n\nstarting region dump\n");
1970 df_dump_start (file);
1972 EXECUTE_IF_SET_IN_BITMAP (df->blocks_to_analyze, 0, bb_index, bi)
1974 basic_block bb = BASIC_BLOCK (bb_index);
1976 df_print_bb_index (bb, file);
1977 df_dump_top (bb, file);
1978 df_dump_bottom (bb, file);
1980 fprintf (file, "\n");
1987 /* Dump the introductory information for each problem defined. */
1990 df_dump_start (FILE *file)
1997 fprintf (file, "\n\n%s\n", current_function_name ());
1998 fprintf (file, "\nDataflow summary:\n");
1999 if (df->blocks_to_analyze)
2000 fprintf (file, "def_info->table_size = %d, use_info->table_size = %d\n",
2001 DF_DEFS_TABLE_SIZE (), DF_USES_TABLE_SIZE ());
2003 for (i = 0; i < df->num_problems_defined; i++)
2005 struct dataflow *dflow = df->problems_in_order[i];
2006 if (dflow->computed)
2008 df_dump_problem_function fun = dflow->problem->dump_start_fun;
2016 /* Dump the top of the block information for BB. */
2019 df_dump_top (basic_block bb, FILE *file)
2026 for (i = 0; i < df->num_problems_defined; i++)
2028 struct dataflow *dflow = df->problems_in_order[i];
2029 if (dflow->computed)
2031 df_dump_bb_problem_function bbfun = dflow->problem->dump_top_fun;
2039 /* Dump the bottom of the block information for BB. */
2042 df_dump_bottom (basic_block bb, FILE *file)
2049 for (i = 0; i < df->num_problems_defined; i++)
2051 struct dataflow *dflow = df->problems_in_order[i];
2052 if (dflow->computed)
2054 df_dump_bb_problem_function bbfun = dflow->problem->dump_bottom_fun;
2063 df_refs_chain_dump (struct df_ref **ref_rec, bool follow_chain, FILE *file)
2065 fprintf (file, "{ ");
2068 struct df_ref *ref = *ref_rec;
2069 fprintf (file, "%c%d(%d)",
2070 DF_REF_REG_DEF_P (ref) ? 'd' : (DF_REF_FLAGS (ref) & DF_REF_IN_NOTE) ? 'e' : 'u',
2072 DF_REF_REGNO (ref));
2074 df_chain_dump (DF_REF_CHAIN (ref), file);
2077 fprintf (file, "}");
2081 /* Dump either a ref-def or reg-use chain. */
2084 df_regs_chain_dump (struct df_ref *ref, FILE *file)
2086 fprintf (file, "{ ");
2089 fprintf (file, "%c%d(%d) ",
2090 DF_REF_REG_DEF_P (ref) ? 'd' : 'u',
2092 DF_REF_REGNO (ref));
2093 ref = ref->next_reg;
2095 fprintf (file, "}");
2100 df_mws_dump (struct df_mw_hardreg **mws, FILE *file)
2104 fprintf (file, "mw %c r[%d..%d]\n",
2105 ((*mws)->type == DF_REF_REG_DEF) ? 'd' : 'u',
2106 (*mws)->start_regno, (*mws)->end_regno);
2113 df_insn_uid_debug (unsigned int uid,
2114 bool follow_chain, FILE *file)
2116 fprintf (file, "insn %d luid %d",
2117 uid, DF_INSN_UID_LUID (uid));
2119 if (DF_INSN_UID_DEFS (uid))
2121 fprintf (file, " defs ");
2122 df_refs_chain_dump (DF_INSN_UID_DEFS (uid), follow_chain, file);
2125 if (DF_INSN_UID_USES (uid))
2127 fprintf (file, " uses ");
2128 df_refs_chain_dump (DF_INSN_UID_USES (uid), follow_chain, file);
2131 if (DF_INSN_UID_EQ_USES (uid))
2133 fprintf (file, " eq uses ");
2134 df_refs_chain_dump (DF_INSN_UID_EQ_USES (uid), follow_chain, file);
2137 if (DF_INSN_UID_MWS (uid))
2139 fprintf (file, " mws ");
2140 df_mws_dump (DF_INSN_UID_MWS (uid), file);
2142 fprintf (file, "\n");
2147 df_insn_debug (rtx insn, bool follow_chain, FILE *file)
2149 df_insn_uid_debug (INSN_UID (insn), follow_chain, file);
2153 df_insn_debug_regno (rtx insn, FILE *file)
2155 unsigned int uid = INSN_UID(insn);
2157 fprintf (file, "insn %d bb %d luid %d defs ",
2158 uid, BLOCK_FOR_INSN (insn)->index, DF_INSN_LUID (insn));
2159 df_refs_chain_dump (DF_INSN_UID_DEFS (uid), false, file);
2161 fprintf (file, " uses ");
2162 df_refs_chain_dump (DF_INSN_UID_USES (uid), false, file);
2164 fprintf (file, " eq_uses ");
2165 df_refs_chain_dump (DF_INSN_UID_EQ_USES (uid), false, file);
2166 fprintf (file, "\n");
2170 df_regno_debug (unsigned int regno, FILE *file)
2172 fprintf (file, "reg %d defs ", regno);
2173 df_regs_chain_dump (DF_REG_DEF_CHAIN (regno), file);
2174 fprintf (file, " uses ");
2175 df_regs_chain_dump (DF_REG_USE_CHAIN (regno), file);
2176 fprintf (file, " eq_uses ");
2177 df_regs_chain_dump (DF_REG_EQ_USE_CHAIN (regno), file);
2178 fprintf (file, "\n");
2183 df_ref_debug (struct df_ref *ref, FILE *file)
2185 fprintf (file, "%c%d ",
2186 DF_REF_REG_DEF_P (ref) ? 'd' : 'u',
2188 fprintf (file, "reg %d bb %d insn %d flag 0x%x type 0x%x ",
2191 DF_REF_INSN (ref) ? INSN_UID (DF_REF_INSN (ref)) : -1,
2194 if (DF_REF_LOC (ref))
2195 fprintf (file, "loc %p(%p) chain ", (void *)DF_REF_LOC (ref), (void *)*DF_REF_LOC (ref));
2197 fprintf (file, "chain ");
2198 df_chain_dump (DF_REF_CHAIN (ref), file);
2199 fprintf (file, "\n");
2202 /* Functions for debugging from GDB. */
2205 debug_df_insn (rtx insn)
2207 df_insn_debug (insn, true, stderr);
2213 debug_df_reg (rtx reg)
2215 df_regno_debug (REGNO (reg), stderr);
2220 debug_df_regno (unsigned int regno)
2222 df_regno_debug (regno, stderr);
2227 debug_df_ref (struct df_ref *ref)
2229 df_ref_debug (ref, stderr);
2234 debug_df_defno (unsigned int defno)
2236 df_ref_debug (DF_DEFS_GET (defno), stderr);
2241 debug_df_useno (unsigned int defno)
2243 df_ref_debug (DF_USES_GET (defno), stderr);
2248 debug_df_chain (struct df_link *link)
2250 df_chain_dump (link, stderr);
2251 fputc ('\n', stderr);