1 /* Allocation for dataflow support routines.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
3 Free Software Foundation, Inc.
4 Originally contributed by Michael P. Hayes
5 (m.hayes@elec.canterbury.ac.nz, mhayes@redhat.com)
6 Major rewrite contributed by Danny Berlin (dberlin@dberlin.org)
7 and Kenneth Zadeck (zadeck@naturalbridge.com).
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 2, or (at your option) any later
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
30 The files in this collection (df*.c,df.h) provide a general framework
31 for solving dataflow problems. The global dataflow is performed using
32 a good implementation of iterative dataflow analysis.
34 The file df-problems.c provides problem instance for the most common
35 dataflow problems: reaching defs, upward exposed uses, live variables,
36 uninitialized variables, def-use chains, and use-def chains. However,
37 the interface allows other dataflow problems to be defined as well.
39 Dataflow analysis is available in most of the rtl backend (the parts
40 between pass_df_initialize and pass_df_finish). It is quite likely
41 that these boundaries will be expanded in the future. The only
42 requirement is that there be a correct control flow graph.
44 There are three variations of the live variable problem that are
45 available whenever dataflow is available. The LR problem finds the
46 areas that can reach a use of a variable, the UR problems finds the
47 areas tha can be reached from a definition of a variable. The LIVE
48 problem finds the intersection of these two areas.
50 There are several optional problems. These can be enabled when they
51 are needed and disabled when they are not needed.
53 Dataflow problems are generally solved in three layers. The bottom
54 layer is called scanning where a data structure is built for each rtl
55 insn that describes the set of defs and uses of that insn. Scanning
56 is generally kept up to date, i.e. as the insns changes, the scanned
57 version of that insn changes also. There are various mechanisms for
58 making this happen and are described in the INCREMENTAL SCANNING
61 In the middle layer, basic blocks are scanned to produce transfer
62 functions which describe the effects of that block on the a global
63 dataflow solution. The transfer functions are only rebuilt if the
64 some instruction within the block has changed.
66 The top layer is the dataflow solution itself. The dataflow solution
67 is computed by using an efficient iterative solver and the trasfer
68 functions. The dataflow solution must be recomputed whenever the
69 control changes or if one of the transfer function changes.
74 Here is an example of using the dataflow routines.
76 df_[ru,rd,urec,ri,chain]_add_problem (flags);
78 df_set_blocks (blocks);
86 DF_[ru,rd,urec,ri,chain]_ADD_PROBLEM adds a problem, defined by an
87 instance to struct df_problem, to the set of problems solved in this
88 instance of df. All calls to add a problem for a given instance of df
89 must occur before the first call to DF_ANALYZE.
91 Problems can be dependent on other problems. For instance, solving
92 def-use or use-def chains is dependent on solving reaching
93 definitions. As long as these dependencies are listed in the problem
94 definition, the order of adding the problems is not material.
95 Otherwise, the problems will be solved in the order of calls to
96 df_add_problem. Note that it is not necessary to have a problem. In
97 that case, df will just be used to do the scanning.
101 DF_SET_BLOCKS is an optional call used to define a region of the
102 function on which the analysis will be performed. The normal case is
103 to analyze the entire function and no call to df_set_blocks is made.
104 DF_SET_BLOCKS only effects the blocks that are effected when computing
105 the transfer functions and final solution. The insn level information
106 is always kept up to date.
108 When a subset is given, the analysis behaves as if the function only
109 contains those blocks and any edges that occur directly between the
110 blocks in the set. Care should be taken to call df_set_blocks right
111 before the call to analyze in order to eliminate the possibility that
112 optimizations that reorder blocks invalidate the bitvector.
114 DF_ANALYZE causes all of the defined problems to be (re)solved. When
115 DF_ANALYZE is completes, the IN and OUT sets for each basic block
116 contain the computer information. The DF_*_BB_INFO macros can be used
117 to access these bitvectors. All defered rescannings are down before
118 the transfer functions are recompited.
120 DF_DUMP can then be called to dump the information produce to some
121 file. This calls DF_DUMP_START, to print the information that is not
122 basic block specific, and then calls DF_DUMP_TOP and DF_DUMP_BOTTOM
123 for each block to print the basic specific information. These parts
124 can all be called separately as part of a larger dump function.
127 DF_FINISH_PASS causes df_remove_problem to be called on all of the
128 optional problems. It also causes any insns whose scanning has been
129 defered to be rescanned as well as clears all of the changeable flags.
130 Setting the pass manager TODO_df_finish flag causes this function to
131 be run. However, the pass manager will call df_finish_pass AFTER the
132 pass dumping has been done, so if you want to see the results of the
133 optional problems in the pass dumps, use the TODO flag rather than
134 calling the function yourself.
138 There are four ways of doing the incremental scanning:
140 1) Immediate rescanning - Calls to df_insn_rescan, df_notes_rescan,
141 df_bb_delete, df_insn_change_bb have been added to most of
142 the low level service functions that maintain the cfg and change
143 rtl. Calling and of these routines many cause some number of insns
146 For most modern rtl passes, this is certainly the easiest way to
147 manage rescanning the insns. This technique also has the advantage
148 that the scanning information is always correct and can be relied
149 apon even after changes have been made to the instructions. This
150 technique is contra indicated in several cases:
152 a) If def-use chains OR use-def chains (but not both) are built,
153 using this is SIMPLY WRONG. The problem is that when a ref is
154 deleted that is the target of an edge, there is not enough
155 information to efficiently find the source of the edge and
156 delete the edge. This leaves a dangling reference that may
159 b) If def-use chains AND use-def chains are built, this may
160 produce unexpected results. The problem is that the incremental
161 scanning of an insn does not know how to repair the chains that
162 point into an insn when the insn changes. So the incremental
163 scanning just deletes the chains that enter and exit the insn
164 being changed. The dangling reference issue in (a) is not a
165 problem here, but if the pass is depending on the chains being
166 maintained after insns have been modified, this technique will
167 not do the correct thing.
169 c) If the pass modifies insns several times, this incremental
170 updating may be expensive.
172 d) If the pass modifies all of the insns, as does register
173 allocation, it is simply better to rescan the entire function.
175 e) If the pass uses either non-standard or ancient techniques to
176 modify insns, automatic detection of the insns that need to be
177 rescanned may be impractical. Cse and regrename fall into this
180 2) Defered rescanning - Calls to df_insn_rescan, df_notes_rescan, and
181 df_insn_delete do not immediately change the insn but instead make
182 a note that the insn needs to be rescanned. The next call to
183 df_analyze, df_finish_pass, or df_process_deferred_rescans will
184 cause all of the pending rescans to be processed.
186 This is the technique of choice if either 1a, 1b, or 1c are issues
187 in the pass. In the case of 1a or 1b, a call to df_remove_problem
188 (df_chain) should be made before the next call to df_analyze or
189 df_process_deferred_rescans.
191 To enable this mode, call df_set_flags (DF_DEFER_INSN_RESCAN).
192 (This mode can be cleared by calling df_clear_flags
193 (DF_DEFER_INSN_RESCAN) but this does not cause the defered insns to
196 3) Total rescanning - In this mode the rescanning is disabled.
197 However, the df information associated with deleted insn is delete
198 at the time the insn is deleted. At the end of the pass, a call
199 must be made to df_insn_rescan_all. This method is used by the
200 register allocator since it generally changes each insn multiple
201 times (once for each ref) and does not need to make use of the
202 updated scanning information.
204 It is also currently used by two older passes (cse, and regrename)
205 which change insns in hard to track ways. It is hoped that this
206 will be fixed soon since this it is expensive to rescan all of the
207 insns when only a small number of them have really changed.
209 4) Do it yourself - In this mechanism, the pass updates the insns
210 itself using the low level df primatives. Currently no pass does
211 this, but it has the advantage that it is quite efficient given
212 that the pass generally has exact knowledge of what it is changing.
216 Scanning produces a `struct df_ref' data structure (ref) is allocated
217 for every register reference (def or use) and this records the insn
218 and bb the ref is found within. The refs are linked together in
219 chains of uses and defs for each insn and for each register. Each ref
220 also has a chain field that links all the use refs for a def or all
221 the def refs for a use. This is used to create use-def or def-use
224 Different optimizations have different needs. Ultimately, only
225 register allocation and schedulers should be using the bitmaps
226 produced for the live register and uninitialized register problems.
227 The rest of the backend should be upgraded to using and maintaining
228 the linked information such as def use or use def chains.
233 While incremental bitmaps are not worthwhile to maintain, incremental
234 chains may be perfectly reasonable. The fastest way to build chains
235 from scratch or after significant modifications is to build reaching
236 definitions (RD) and build the chains from this.
238 However, general algorithms for maintaining use-def or def-use chains
239 are not practical. The amount of work to recompute the chain any
240 chain after an arbitrary change is large. However, with a modest
241 amount of work it is generally possible to have the application that
242 uses the chains keep them up to date. The high level knowledge of
243 what is really happening is essential to crafting efficient
244 incremental algorithms.
246 As for the bit vector problems, there is no interface to give a set of
247 blocks over with to resolve the iteration. In general, restarting a
248 dataflow iteration is difficult and expensive. Again, the best way to
249 keep the dataflow information up to data (if this is really what is
250 needed) it to formulate a problem specific solution.
252 There are fine grained calls for creating and deleting references from
253 instructions in df-scan.c. However, these are not currently connected
254 to the engine that resolves the dataflow equations.
259 The basic object is a DF_REF (reference) and this may either be a
260 DEF (definition) or a USE of a register.
262 These are linked into a variety of lists; namely reg-def, reg-use,
263 insn-def, insn-use, def-use, and use-def lists. For example, the
264 reg-def lists contain all the locations that define a given register
265 while the insn-use lists contain all the locations that use a
268 Note that the reg-def and reg-use chains are generally short for
269 pseudos and long for the hard registers.
273 1) The df insn information is kept in the insns array. This array is
276 2) Each insn has three sets of refs: They are linked into one of three
277 lists: the insn's defs list (accessed by the DF_INSN_DEFS or
278 DF_INSN_UID_DEFS macros), the insn's uses list (accessed by the
279 DF_INSN_USES or DF_INSN_UID_USES macros) or the insn's eq_uses list
280 (accessed by the DF_INSN_EQ_USES or DF_INSN_UID_EQ_USES macros).
281 The latter list are the list of references in REG_EQUAL or
282 REG_EQUIV notes. These macros produce a ref (or NULL), the rest of
283 the list can be obtained by traversal of the NEXT_REF field
284 (accessed by the DF_REF_NEXT_REF macro.) There is no significance
285 to the ordering of the uses or refs in an instruction.
287 3) Each insn has a logical uid field (LUID). When properly set, this
288 is an integer that numbers each insn in the basic block, in order from
289 the start of the block. The numbers are only correct after a call to
290 df_analyse. They will rot after insns are added deleted or moved
295 There are 4 ways to obtain access to refs:
297 1) References are divided into two categories, REAL and ARTIFICIAL.
299 REAL refs are associated with instructions.
301 ARTIFICIAL refs are associated with basic blocks. The heads of
302 these lists can be accessed by calling df_get_artificial_defs or
303 df_get_artificial_uses for the particular basic block.
305 Artificial defs and uses occur both at the beginning and ends of blocks.
307 For blocks that area at the destination of eh edges, the
308 artificial uses and defs occur at the beginning. The defs relate
309 to the registers specified in EH_RETURN_DATA_REGNO and the uses
310 relate to the registers specified in ED_USES. Logically these
311 defs and uses should really occur along the eh edge, but there is
312 no convenient way to do this. Artificial edges that occur at the
313 beginning of the block have the DF_REF_AT_TOP flag set.
315 Artificial uses occur at the end of all blocks. These arise from
316 the hard registers that are always live, such as the stack
317 register and are put there to keep the code from forgetting about
320 Artificial defs occur at the end of the entry block. These arise
321 from registers that are live at entry to the function.
323 2) There are three types of refs: defs, uses and eq_uses. (Eq_uses are
324 uses that appear inside a REG_EQUAL or REG_EQUIV note.)
326 All of the eq_uses, uses and defs associated with each pseudo or
327 hard register may be linked in a bidirectional chain. These are
328 called reg-use or reg_def chains. If the changeable flag
329 DF_EQ_NOTES is set when the chains are built, the eq_uses will be
330 treated like uses. If it is not set they are ignored.
332 The first use, eq_use or def for a register can be obtained using
333 the DF_REG_USE_CHAIN, DF_REG_EQ_USE_CHAIN or DF_REG_DEF_CHAIN
334 macros. Subsequent uses for the same regno can be obtained by
335 following the next_reg field of the ref. The number of elements in
336 each of the chains can be found by using the DF_REG_USE_COUNT,
337 DF_REG_EQ_USE_COUNT or DF_REG_DEF_COUNT macros.
339 In previous versions of this code, these chains were ordered. It
340 has not been practical to continue this practice.
342 3) If def-use or use-def chains are built, these can be traversed to
343 get to other refs. If the flag DF_EQ_NOTES has been set, the chains
344 include the eq_uses. Otherwise these are ignored when building the
347 4) An array of all of the uses (and an array of all of the defs) can
349 be built. These arrays are indexed by the value in the id
350 structure. These arrays are only lazily kept up to date, and that
351 process can be expensive. To have these arrays built, call
352 df_reorganize_defs or df_reorganize_uses. If the flag DF_EQ_NOTES
353 has been set the array will contain the eq_uses. Otherwise these
354 are ignored when building the array and assigning the ids. Note
355 that the values in the id field of a ref may change across calls to
356 df_analyze or df_reorganize_defs or df_reorganize_uses.
358 If the only use of this array is to find all of the refs, it is
359 better to traverse all of the registers and then traverse all of
360 reg-use or reg-def chains.
364 Embedded addressing side-effects, such as POST_INC or PRE_INC, generate
365 both a use and a def. These are both marked read/write to show that they
366 are dependent. For example, (set (reg 40) (mem (post_inc (reg 42))))
367 will generate a use of reg 42 followed by a def of reg 42 (both marked
368 read/write). Similarly, (set (reg 40) (mem (pre_dec (reg 41))))
369 generates a use of reg 41 then a def of reg 41 (both marked read/write),
370 even though reg 41 is decremented before it is used for the memory
371 address in this second example.
373 A set to a REG inside a ZERO_EXTRACT, or a set to a non-paradoxical SUBREG
374 for which the number of word_mode units covered by the outer mode is
375 smaller than that covered by the inner mode, invokes a read-modify-write.
376 operation. We generate both a use and a def and again mark them
379 Paradoxical subreg writes do not leave a trace of the old content, so they
380 are write-only operations.
386 #include "coretypes.h"
390 #include "insn-config.h"
392 #include "function.h"
395 #include "alloc-pool.h"
397 #include "hard-reg-set.h"
398 #include "basic-block.h"
403 #include "tree-pass.h"
405 static void *df_get_bb_info (struct dataflow *, unsigned int);
406 static void df_set_bb_info (struct dataflow *, unsigned int, void *);
408 static void df_set_clean_cfg (void);
411 /* An obstack for bitmap not related to specific dataflow problems.
412 This obstack should e.g. be used for bitmaps with a short life time
413 such as temporary bitmaps. */
415 bitmap_obstack df_bitmap_obstack;
418 /*----------------------------------------------------------------------------
419 Functions to create, destroy and manipulate an instance of df.
420 ----------------------------------------------------------------------------*/
424 /* Add PROBLEM (and any dependent problems) to the DF instance. */
427 df_add_problem (struct df_problem *problem)
429 struct dataflow *dflow;
432 /* First try to add the dependent problem. */
433 if (problem->dependent_problem)
434 df_add_problem (problem->dependent_problem);
436 /* Check to see if this problem has already been defined. If it
437 has, just return that instance, if not, add it to the end of the
439 dflow = df->problems_by_index[problem->id];
443 /* Make a new one and add it to the end. */
444 dflow = XCNEW (struct dataflow);
445 dflow->problem = problem;
446 dflow->computed = false;
447 dflow->solutions_dirty = true;
448 df->problems_by_index[dflow->problem->id] = dflow;
450 /* Keep the defined problems ordered by index. This solves the
451 problem that RI will use the information from UREC if UREC has
452 been defined, or from LIVE if LIVE is defined and otherwise LR.
453 However for this to work, the computation of RI must be pushed
454 after which ever of those problems is defined, but we do not
455 require any of those except for LR to have actually been
457 df->num_problems_defined++;
458 for (i = df->num_problems_defined - 2; i >= 0; i--)
460 if (problem->id < df->problems_in_order[i]->problem->id)
461 df->problems_in_order[i+1] = df->problems_in_order[i];
464 df->problems_in_order[i+1] = dflow;
468 df->problems_in_order[0] = dflow;
472 /* Set the MASK flags in the DFLOW problem. The old flags are
473 returned. If a flag is not allowed to be changed this will fail if
474 checking is enabled. */
475 enum df_changeable_flags
476 df_set_flags (enum df_changeable_flags changeable_flags)
478 enum df_changeable_flags old_flags = df->changeable_flags;
479 df->changeable_flags |= changeable_flags;
484 /* Clear the MASK flags in the DFLOW problem. The old flags are
485 returned. If a flag is not allowed to be changed this will fail if
486 checking is enabled. */
487 enum df_changeable_flags
488 df_clear_flags (enum df_changeable_flags changeable_flags)
490 enum df_changeable_flags old_flags = df->changeable_flags;
491 df->changeable_flags &= ~changeable_flags;
496 /* Set the blocks that are to be considered for analysis. If this is
497 not called or is called with null, the entire function in
501 df_set_blocks (bitmap blocks)
506 bitmap_print (dump_file, blocks, "setting blocks to analyze ", "\n");
507 if (df->blocks_to_analyze)
510 bitmap diff = BITMAP_ALLOC (&df_bitmap_obstack);
511 bitmap_and_compl (diff, df->blocks_to_analyze, blocks);
512 for (p = df->num_problems_defined - 1; p >= DF_FIRST_OPTIONAL_PROBLEM ;p--)
514 struct dataflow *dflow = df->problems_in_order[p];
515 if (dflow->problem->reset_fun)
516 dflow->problem->reset_fun (df->blocks_to_analyze);
517 else if (dflow->problem->free_bb_fun)
520 unsigned int bb_index;
522 EXECUTE_IF_SET_IN_BITMAP (diff, 0, bb_index, bi)
524 basic_block bb = BASIC_BLOCK (bb_index);
527 void *bb_info = df_get_bb_info (dflow, bb_index);
530 dflow->problem->free_bb_fun (bb, bb_info);
531 df_set_bb_info (dflow, bb_index, NULL);
542 /* If we have not actually run scanning before, do not try
543 to clear anything. */
544 if (df_scan->problem_data)
546 bitmap blocks_to_reset = NULL;
548 for (p = df->num_problems_defined - 1; p >= DF_FIRST_OPTIONAL_PROBLEM ;p--)
550 struct dataflow *dflow = df->problems_in_order[p];
551 if (dflow->problem->reset_fun)
553 if (!blocks_to_reset)
557 BITMAP_ALLOC (&df_bitmap_obstack);
560 bitmap_set_bit (blocks_to_reset, bb->index);
563 dflow->problem->reset_fun (blocks_to_reset);
567 BITMAP_FREE (blocks_to_reset);
569 df->blocks_to_analyze = BITMAP_ALLOC (&df_bitmap_obstack);
571 bitmap_copy (df->blocks_to_analyze, blocks);
572 df->analyze_subset = true;
577 fprintf (dump_file, "clearing blocks to analyze\n");
578 if (df->blocks_to_analyze)
580 BITMAP_FREE (df->blocks_to_analyze);
581 df->blocks_to_analyze = NULL;
583 df->analyze_subset = false;
586 /* Setting the blocks causes the refs to be unorganized since only
587 the refs in the blocks are seen. */
588 df_maybe_reorganize_def_refs (DF_REF_ORDER_NO_TABLE);
589 df_maybe_reorganize_use_refs (DF_REF_ORDER_NO_TABLE);
590 df_mark_solutions_dirty ();
594 /* Delete a DFLOW problem (and any problems that depend on this
598 df_remove_problem (struct dataflow *dflow)
600 struct df_problem *problem;
607 problem = dflow->problem;
608 gcc_assert (problem->remove_problem_fun);
610 /* Normally only optional problems are removed, but during global,
611 we remove ur and live and replace it with urec. */
612 if (problem->id >= DF_FIRST_OPTIONAL_PROBLEM)
613 start = DF_FIRST_OPTIONAL_PROBLEM;
615 /* Delete any problems that depended on this problem first. */
616 for (i = start; i < df->num_problems_defined; i++)
617 if (df->problems_in_order[i]->problem->dependent_problem == problem)
618 df_remove_problem (df->problems_in_order[i]);
620 /* Now remove this problem. */
621 for (i = start; i < df->num_problems_defined; i++)
622 if (df->problems_in_order[i] == dflow)
625 for (j = i + 1; j < df->num_problems_defined; j++)
626 df->problems_in_order[j-1] = df->problems_in_order[j];
627 df->problems_in_order[j] = NULL;
628 df->num_problems_defined--;
632 (problem->remove_problem_fun) ();
633 df->problems_by_index[problem->id] = NULL;
637 /* Remove all of the problems that are not permanent. Scanning, lr,
638 ur and live are permanent, the rest are removeable. Also clear all
639 of the changeable_flags. */
642 df_finish_pass (void)
647 #ifdef ENABLE_CHECKING
648 enum df_changeable_flags saved_flags;
654 df_maybe_reorganize_def_refs (DF_REF_ORDER_NO_TABLE);
655 df_maybe_reorganize_use_refs (DF_REF_ORDER_NO_TABLE);
657 #ifdef ENABLE_CHECKING
658 saved_flags = df->changeable_flags;
661 for (i = DF_FIRST_OPTIONAL_PROBLEM; i < df->num_problems_defined; i++)
663 struct dataflow *dflow = df->problems_in_order[i];
664 struct df_problem *problem = dflow->problem;
666 gcc_assert (problem->remove_problem_fun);
667 (problem->remove_problem_fun) ();
668 df->problems_in_order[i] = NULL;
669 df->problems_by_index[problem->id] = NULL;
672 df->num_problems_defined -= removed;
674 /* Clear all of the flags. */
675 df->changeable_flags = 0;
676 df_process_deferred_rescans ();
678 /* Set the focus back to the whole function. */
679 if (df->blocks_to_analyze)
681 BITMAP_FREE (df->blocks_to_analyze);
682 df->blocks_to_analyze = NULL;
683 df_mark_solutions_dirty ();
684 df->analyze_subset = false;
687 #ifdef ENABLE_CHECKING
688 /* Verification will fail in DF_NO_INSN_RESCAN. */
689 if (!(saved_flags & DF_NO_INSN_RESCAN))
691 df_lr_verify_transfer_functions ();
693 df_live_verify_transfer_functions ();
703 /* Set up the dataflow instance for the entire back end. */
706 rest_of_handle_df_initialize (void)
709 df = XCNEW (struct df);
710 df->changeable_flags = 0;
712 bitmap_obstack_initialize (&df_bitmap_obstack);
714 /* Set this to a conservative value. Stack_ptr_mod will compute it
716 current_function_sp_is_unchanging = 0;
718 df_scan_add_problem ();
719 df_scan_alloc (NULL);
721 /* These three problems are permanent. */
722 df_lr_add_problem ();
724 df_live_add_problem ();
726 df->postorder = XNEWVEC (int, last_basic_block);
727 df->postorder_inverted = XNEWVEC (int, last_basic_block);
728 df->n_blocks = post_order_compute (df->postorder, true, true);
729 df->n_blocks_inverted = inverted_post_order_compute (df->postorder_inverted);
730 gcc_assert (df->n_blocks == df->n_blocks_inverted);
732 df->hard_regs_live_count = XNEWVEC (unsigned int, FIRST_PSEUDO_REGISTER);
733 memset (df->hard_regs_live_count, 0,
734 sizeof (unsigned int) * FIRST_PSEUDO_REGISTER);
737 /* After reload, some ports add certain bits to regs_ever_live so
738 this cannot be reset. */
739 df_compute_regs_ever_live (true);
741 df_compute_regs_ever_live (false);
753 struct tree_opt_pass pass_df_initialize_opt =
757 rest_of_handle_df_initialize, /* execute */
760 0, /* static_pass_number */
762 0, /* properties_required */
763 0, /* properties_provided */
764 0, /* properties_destroyed */
765 0, /* todo_flags_start */
766 0, /* todo_flags_finish */
774 return optimize == 0;
778 struct tree_opt_pass pass_df_initialize_no_opt =
781 gate_no_opt, /* gate */
782 rest_of_handle_df_initialize, /* execute */
785 0, /* static_pass_number */
787 0, /* properties_required */
788 0, /* properties_provided */
789 0, /* properties_destroyed */
790 0, /* todo_flags_start */
791 0, /* todo_flags_finish */
796 /* Free all the dataflow info and the DF structure. This should be
797 called from the df_finish macro which also NULLs the parm. */
800 rest_of_handle_df_finish (void)
806 for (i = 0; i < df->num_problems_defined; i++)
808 struct dataflow *dflow = df->problems_in_order[i];
809 dflow->problem->free_fun ();
813 free (df->postorder);
814 if (df->postorder_inverted)
815 free (df->postorder_inverted);
816 free (df->hard_regs_live_count);
820 bitmap_obstack_release (&df_bitmap_obstack);
825 struct tree_opt_pass pass_df_finish =
827 "dfinish", /* name */
829 rest_of_handle_df_finish, /* execute */
832 0, /* static_pass_number */
834 0, /* properties_required */
835 0, /* properties_provided */
836 0, /* properties_destroyed */
837 0, /* todo_flags_start */
838 0, /* todo_flags_finish */
846 /*----------------------------------------------------------------------------
847 The general data flow analysis engine.
848 ----------------------------------------------------------------------------*/
851 /* Helper function for df_worklist_dataflow.
852 Propagate the dataflow forward.
853 Given a BB_INDEX, do the dataflow propagation
854 and set bits on for successors in PENDING
855 if the out set of the dataflow has changed. */
858 df_worklist_propagate_forward (struct dataflow *dataflow,
860 unsigned *bbindex_to_postorder,
866 basic_block bb = BASIC_BLOCK (bb_index);
868 /* Calculate <conf_op> of incoming edges. */
869 if (EDGE_COUNT (bb->preds) > 0)
870 FOR_EACH_EDGE (e, ei, bb->preds)
872 if (TEST_BIT (considered, e->src->index))
873 dataflow->problem->con_fun_n (e);
875 else if (dataflow->problem->con_fun_0)
876 dataflow->problem->con_fun_0 (bb);
878 if (dataflow->problem->trans_fun (bb_index))
880 /* The out set of this block has changed.
881 Propagate to the outgoing blocks. */
882 FOR_EACH_EDGE (e, ei, bb->succs)
884 unsigned ob_index = e->dest->index;
886 if (TEST_BIT (considered, ob_index))
887 bitmap_set_bit (pending, bbindex_to_postorder[ob_index]);
893 /* Helper function for df_worklist_dataflow.
894 Propagate the dataflow backward. */
897 df_worklist_propagate_backward (struct dataflow *dataflow,
899 unsigned *bbindex_to_postorder,
905 basic_block bb = BASIC_BLOCK (bb_index);
907 /* Calculate <conf_op> of incoming edges. */
908 if (EDGE_COUNT (bb->succs) > 0)
909 FOR_EACH_EDGE (e, ei, bb->succs)
911 if (TEST_BIT (considered, e->dest->index))
912 dataflow->problem->con_fun_n (e);
914 else if (dataflow->problem->con_fun_0)
915 dataflow->problem->con_fun_0 (bb);
917 if (dataflow->problem->trans_fun (bb_index))
919 /* The out set of this block has changed.
920 Propagate to the outgoing blocks. */
921 FOR_EACH_EDGE (e, ei, bb->preds)
923 unsigned ob_index = e->src->index;
925 if (TEST_BIT (considered, ob_index))
926 bitmap_set_bit (pending, bbindex_to_postorder[ob_index]);
932 /* Worklist-based dataflow solver. It uses sbitmap as a worklist,
933 with "n"-th bit representing the n-th block in the reverse-postorder order.
934 This is so-called over-eager algorithm where it propagates
935 changes on demand. This algorithm may visit blocks more than
936 iterative method if there are deeply nested loops.
937 Worklist algorithm works better than iterative algorithm
938 for CFGs with no nested loops.
939 In practice, the measurement shows worklist algorithm beats
940 iterative algorithm by some margin overall.
941 Note that this is slightly different from the traditional textbook worklist solver,
942 in that the worklist is effectively sorted by the reverse postorder.
943 For CFGs with no nested loops, this is optimal. */
946 df_worklist_dataflow (struct dataflow *dataflow,
947 bitmap blocks_to_consider,
948 int *blocks_in_postorder,
951 bitmap pending = BITMAP_ALLOC (&df_bitmap_obstack);
952 sbitmap considered = sbitmap_alloc (last_basic_block);
954 unsigned int *bbindex_to_postorder;
957 enum df_flow_dir dir = dataflow->problem->dir;
959 gcc_assert (dir != DF_NONE);
961 /* BBINDEX_TO_POSTORDER maps the bb->index to the reverse postorder. */
962 bbindex_to_postorder =
963 (unsigned int *)xmalloc (last_basic_block * sizeof (unsigned int));
965 /* Initialize the array to an out-of-bound value. */
966 for (i = 0; i < last_basic_block; i++)
967 bbindex_to_postorder[i] = last_basic_block;
969 /* Initialize the considered map. */
970 sbitmap_zero (considered);
971 EXECUTE_IF_SET_IN_BITMAP (blocks_to_consider, 0, index, bi)
973 SET_BIT (considered, index);
976 /* Initialize the mapping of block index to postorder. */
977 for (i = 0; i < n_blocks; i++)
979 bbindex_to_postorder[blocks_in_postorder[i]] = i;
980 /* Add all blocks to the worklist. */
981 bitmap_set_bit (pending, i);
984 if (dataflow->problem->init_fun)
985 dataflow->problem->init_fun (blocks_to_consider);
987 while (!bitmap_empty_p (pending))
991 index = bitmap_first_set_bit (pending);
992 bitmap_clear_bit (pending, index);
994 bb_index = blocks_in_postorder[index];
996 if (dir == DF_FORWARD)
997 df_worklist_propagate_forward (dataflow, bb_index,
998 bbindex_to_postorder,
999 pending, considered);
1001 df_worklist_propagate_backward (dataflow, bb_index,
1002 bbindex_to_postorder,
1003 pending, considered);
1006 BITMAP_FREE (pending);
1007 sbitmap_free (considered);
1008 free (bbindex_to_postorder);
1012 /* Remove the entries not in BLOCKS from the LIST of length LEN, preserving
1013 the order of the remaining entries. Returns the length of the resulting
1017 df_prune_to_subcfg (int list[], unsigned len, bitmap blocks)
1021 for (act = 0, last = 0; act < len; act++)
1022 if (bitmap_bit_p (blocks, list[act]))
1023 list[last++] = list[act];
1029 /* Execute dataflow analysis on a single dataflow problem.
1031 BLOCKS_TO_CONSIDER are the blocks whose solution can either be
1032 examined or will be computed. For calls from DF_ANALYZE, this is
1033 the set of blocks that has been passed to DF_SET_BLOCKS.
1037 df_analyze_problem (struct dataflow *dflow,
1038 bitmap blocks_to_consider,
1039 int *postorder, int n_blocks)
1041 timevar_push (dflow->problem->tv_id);
1043 #ifdef ENABLE_CHECKING
1044 if (dflow->problem->verify_start_fun)
1045 dflow->problem->verify_start_fun ();
1048 /* (Re)Allocate the datastructures necessary to solve the problem. */
1049 if (dflow->problem->alloc_fun)
1050 dflow->problem->alloc_fun (blocks_to_consider);
1052 /* Set up the problem and compute the local information. */
1053 if (dflow->problem->local_compute_fun)
1054 dflow->problem->local_compute_fun (blocks_to_consider);
1056 /* Solve the equations. */
1057 if (dflow->problem->dataflow_fun)
1058 dflow->problem->dataflow_fun (dflow, blocks_to_consider,
1059 postorder, n_blocks);
1061 /* Massage the solution. */
1062 if (dflow->problem->finalize_fun)
1063 dflow->problem->finalize_fun (blocks_to_consider);
1065 #ifdef ENABLE_CHECKING
1066 if (dflow->problem->verify_end_fun)
1067 dflow->problem->verify_end_fun ();
1070 timevar_pop (dflow->problem->tv_id);
1072 dflow->computed = true;
1076 /* Analyze dataflow info for the basic blocks specified by the bitmap
1077 BLOCKS, or for the whole CFG if BLOCKS is zero. */
1082 bitmap current_all_blocks = BITMAP_ALLOC (&df_bitmap_obstack);
1087 free (df->postorder);
1088 if (df->postorder_inverted)
1089 free (df->postorder_inverted);
1090 df->postorder = XNEWVEC (int, last_basic_block);
1091 df->postorder_inverted = XNEWVEC (int, last_basic_block);
1092 df->n_blocks = post_order_compute (df->postorder, true, true);
1093 df->n_blocks_inverted = inverted_post_order_compute (df->postorder_inverted);
1095 /* These should be the same. */
1096 gcc_assert (df->n_blocks == df->n_blocks_inverted);
1098 /* We need to do this before the df_verify_all because this is
1099 not kept incrementally up to date. */
1100 df_compute_regs_ever_live (false);
1101 df_process_deferred_rescans ();
1103 #ifdef ENABLE_CHECKING
1105 fprintf (dump_file, "df_analyze called\n");
1109 for (i = 0; i < df->n_blocks; i++)
1110 bitmap_set_bit (current_all_blocks, df->postorder[i]);
1112 #ifdef ENABLE_CHECKING
1113 /* Verify that POSTORDER_INVERTED only contains blocks reachable from
1115 for (i = 0; i < df->n_blocks_inverted; i++)
1116 gcc_assert (bitmap_bit_p (current_all_blocks, df->postorder_inverted[i]));
1119 /* Make sure that we have pruned any unreachable blocks from these
1121 if (df->analyze_subset)
1124 bitmap_and_into (df->blocks_to_analyze, current_all_blocks);
1125 df->n_blocks = df_prune_to_subcfg (df->postorder,
1126 df->n_blocks, df->blocks_to_analyze);
1127 df->n_blocks_inverted = df_prune_to_subcfg (df->postorder_inverted,
1128 df->n_blocks_inverted,
1129 df->blocks_to_analyze);
1130 BITMAP_FREE (current_all_blocks);
1135 df->blocks_to_analyze = current_all_blocks;
1136 current_all_blocks = NULL;
1139 /* Skip over the DF_SCAN problem. */
1140 for (i = 1; i < df->num_problems_defined; i++)
1142 struct dataflow *dflow = df->problems_in_order[i];
1143 if (dflow->solutions_dirty)
1145 if (dflow->problem->dir == DF_FORWARD)
1146 df_analyze_problem (dflow,
1147 df->blocks_to_analyze,
1148 df->postorder_inverted,
1149 df->n_blocks_inverted);
1151 df_analyze_problem (dflow,
1152 df->blocks_to_analyze,
1160 BITMAP_FREE (df->blocks_to_analyze);
1161 df->blocks_to_analyze = NULL;
1165 df_set_clean_cfg ();
1170 /* Return the number of basic blocks from the last call to df_analyze. */
1173 df_get_n_blocks (enum df_flow_dir dir)
1175 gcc_assert (dir != DF_NONE);
1177 if (dir == DF_FORWARD)
1179 gcc_assert (df->postorder_inverted);
1180 return df->n_blocks_inverted;
1183 gcc_assert (df->postorder);
1184 return df->n_blocks;
1188 /* Return a pointer to the array of basic blocks in the reverse postorder.
1189 Depending on the direction of the dataflow problem,
1190 it returns either the usual reverse postorder array
1191 or the reverse postorder of inverted traversal. */
1193 df_get_postorder (enum df_flow_dir dir)
1195 gcc_assert (dir != DF_NONE);
1197 if (dir == DF_FORWARD)
1199 gcc_assert (df->postorder_inverted);
1200 return df->postorder_inverted;
1202 gcc_assert (df->postorder);
1203 return df->postorder;
1206 static struct df_problem user_problem;
1207 static struct dataflow user_dflow;
1209 /* Interface for calling iterative dataflow with user defined
1210 confluence and transfer functions. All that is necessary is to
1211 supply DIR, a direction, CONF_FUN_0, a confluence function for
1212 blocks with no logical preds (or NULL), CONF_FUN_N, the normal
1213 confluence function, TRANS_FUN, the basic block transfer function,
1214 and BLOCKS, the set of blocks to examine, POSTORDER the blocks in
1215 postorder, and N_BLOCKS, the number of blocks in POSTORDER. */
1218 df_simple_dataflow (enum df_flow_dir dir,
1219 df_init_function init_fun,
1220 df_confluence_function_0 con_fun_0,
1221 df_confluence_function_n con_fun_n,
1222 df_transfer_function trans_fun,
1223 bitmap blocks, int * postorder, int n_blocks)
1225 memset (&user_problem, 0, sizeof (struct df_problem));
1226 user_problem.dir = dir;
1227 user_problem.init_fun = init_fun;
1228 user_problem.con_fun_0 = con_fun_0;
1229 user_problem.con_fun_n = con_fun_n;
1230 user_problem.trans_fun = trans_fun;
1231 user_dflow.problem = &user_problem;
1232 df_worklist_dataflow (&user_dflow, blocks, postorder, n_blocks);
1237 /*----------------------------------------------------------------------------
1238 Functions to support limited incremental change.
1239 ----------------------------------------------------------------------------*/
1242 /* Get basic block info. */
1245 df_get_bb_info (struct dataflow *dflow, unsigned int index)
1247 if (dflow->block_info == NULL)
1249 if (index >= dflow->block_info_size)
1251 return (struct df_scan_bb_info *) dflow->block_info[index];
1255 /* Set basic block info. */
1258 df_set_bb_info (struct dataflow *dflow, unsigned int index,
1261 gcc_assert (dflow->block_info);
1262 dflow->block_info[index] = bb_info;
1266 /* Mark the solutions as being out of date. */
1269 df_mark_solutions_dirty (void)
1274 for (p = 1; p < df->num_problems_defined; p++)
1275 df->problems_in_order[p]->solutions_dirty = true;
1280 /* Return true if BB needs it's transfer functions recomputed. */
1283 df_get_bb_dirty (basic_block bb)
1286 return bitmap_bit_p (df_live->out_of_date_transfer_functions, bb->index);
1292 /* Mark BB as needing it's transfer functions as being out of
1296 df_set_bb_dirty (basic_block bb)
1301 for (p = 1; p < df->num_problems_defined; p++)
1303 struct dataflow *dflow = df->problems_in_order[p];
1304 if (dflow->out_of_date_transfer_functions)
1305 bitmap_set_bit (dflow->out_of_date_transfer_functions, bb->index);
1307 df_mark_solutions_dirty ();
1312 /* Clear the dirty bits. This is called from places that delete
1315 df_clear_bb_dirty (basic_block bb)
1318 for (p = 1; p < df->num_problems_defined; p++)
1320 struct dataflow *dflow = df->problems_in_order[p];
1321 if (dflow->out_of_date_transfer_functions)
1322 bitmap_clear_bit (dflow->out_of_date_transfer_functions, bb->index);
1325 /* Called from the rtl_compact_blocks to reorganize the problems basic
1329 df_compact_blocks (void)
1333 void **problem_temps;
1334 int size = last_basic_block * sizeof (void *);
1335 bitmap tmp = BITMAP_ALLOC (&df_bitmap_obstack);
1336 problem_temps = xmalloc (size);
1338 for (p = 0; p < df->num_problems_defined; p++)
1340 struct dataflow *dflow = df->problems_in_order[p];
1342 /* Need to reorganize the out_of_date_transfer_functions for the
1344 if (dflow->out_of_date_transfer_functions)
1346 bitmap_copy (tmp, dflow->out_of_date_transfer_functions);
1347 bitmap_clear (dflow->out_of_date_transfer_functions);
1348 if (bitmap_bit_p (tmp, ENTRY_BLOCK))
1349 bitmap_set_bit (dflow->out_of_date_transfer_functions, ENTRY_BLOCK);
1350 if (bitmap_bit_p (tmp, EXIT_BLOCK))
1351 bitmap_set_bit (dflow->out_of_date_transfer_functions, EXIT_BLOCK);
1353 i = NUM_FIXED_BLOCKS;
1356 if (bitmap_bit_p (tmp, bb->index))
1357 bitmap_set_bit (dflow->out_of_date_transfer_functions, i);
1362 /* Now shuffle the block info for the problem. */
1363 if (dflow->problem->free_bb_fun)
1365 df_grow_bb_info (dflow);
1366 memcpy (problem_temps, dflow->block_info, size);
1368 /* Copy the bb info from the problem tmps to the proper
1369 place in the block_info vector. Null out the copied
1370 item. The entry and exit blocks never move. */
1371 i = NUM_FIXED_BLOCKS;
1374 df_set_bb_info (dflow, i, problem_temps[bb->index]);
1375 problem_temps[bb->index] = NULL;
1378 memset (dflow->block_info + i, 0,
1379 (last_basic_block - i) *sizeof (void *));
1381 /* Free any block infos that were not copied (and NULLed).
1382 These are from orphaned blocks. */
1383 for (i = NUM_FIXED_BLOCKS; i < last_basic_block; i++)
1385 basic_block bb = BASIC_BLOCK (i);
1386 if (problem_temps[i] && bb)
1387 dflow->problem->free_bb_fun
1388 (bb, problem_temps[i]);
1393 /* Shuffle the bits in the basic_block indexed arrays. */
1395 if (df->blocks_to_analyze)
1397 if (bitmap_bit_p (tmp, ENTRY_BLOCK))
1398 bitmap_set_bit (df->blocks_to_analyze, ENTRY_BLOCK);
1399 if (bitmap_bit_p (tmp, EXIT_BLOCK))
1400 bitmap_set_bit (df->blocks_to_analyze, EXIT_BLOCK);
1401 bitmap_copy (tmp, df->blocks_to_analyze);
1402 bitmap_clear (df->blocks_to_analyze);
1403 i = NUM_FIXED_BLOCKS;
1406 if (bitmap_bit_p (tmp, bb->index))
1407 bitmap_set_bit (df->blocks_to_analyze, i);
1414 free (problem_temps);
1416 i = NUM_FIXED_BLOCKS;
1419 SET_BASIC_BLOCK (i, bb);
1424 gcc_assert (i == n_basic_blocks);
1426 for (; i < last_basic_block; i++)
1427 SET_BASIC_BLOCK (i, NULL);
1430 if (!df_lr->solutions_dirty)
1431 df_set_clean_cfg ();
1436 /* Shove NEW_BLOCK in at OLD_INDEX. Called from ifcvt to hack a
1437 block. There is no excuse for people to do this kind of thing. */
1440 df_bb_replace (int old_index, basic_block new_block)
1442 int new_block_index = new_block->index;
1446 fprintf (dump_file, "shoving block %d into %d\n", new_block_index, old_index);
1449 gcc_assert (BASIC_BLOCK (old_index) == NULL);
1451 for (p = 0; p < df->num_problems_defined; p++)
1453 struct dataflow *dflow = df->problems_in_order[p];
1454 if (dflow->block_info)
1456 df_grow_bb_info (dflow);
1457 gcc_assert (df_get_bb_info (dflow, old_index) == NULL);
1458 df_set_bb_info (dflow, old_index,
1459 df_get_bb_info (dflow, new_block_index));
1463 df_clear_bb_dirty (new_block);
1464 SET_BASIC_BLOCK (old_index, new_block);
1465 new_block->index = old_index;
1466 df_set_bb_dirty (BASIC_BLOCK (old_index));
1467 SET_BASIC_BLOCK (new_block_index, NULL);
1471 /* Free all of the per basic block dataflow from all of the problems.
1472 This is typically called before a basic block is deleted and the
1473 problem will be reanalyzed. */
1476 df_bb_delete (int bb_index)
1478 basic_block bb = BASIC_BLOCK (bb_index);
1484 for (i = 0; i < df->num_problems_defined; i++)
1486 struct dataflow *dflow = df->problems_in_order[i];
1487 if (dflow->problem->free_bb_fun)
1489 void *bb_info = df_get_bb_info (dflow, bb_index);
1492 dflow->problem->free_bb_fun (bb, bb_info);
1493 df_set_bb_info (dflow, bb_index, NULL);
1497 df_clear_bb_dirty (bb);
1498 df_mark_solutions_dirty ();
1502 /* Verify that there is a place for everything and everything is in
1503 its place. This is too expensive to run after every pass in the
1504 mainline. However this is an excellent debugging tool if the
1505 dataflow infomation is not being updated properly. You can just
1506 sprinkle calls in until you find the place that is changing an
1507 underlying structure without calling the proper updating
1514 df_lr_verify_transfer_functions ();
1516 df_live_verify_transfer_functions ();
1521 /* Compute an array of ints that describes the cfg. This can be used
1522 to discover places where the cfg is modified by the appropriate
1523 calls have not been made to the keep df informed. The internals of
1524 this are unexciting, the key is that two instances of this can be
1525 compared to see if any changes have been made to the cfg. */
1528 df_compute_cfg_image (void)
1531 int size = 2 + (2 * n_basic_blocks);
1537 size += EDGE_COUNT (bb->succs);
1540 map = XNEWVEC (int, size);
1548 map[i++] = bb->index;
1549 FOR_EACH_EDGE (e, ei, bb->succs)
1550 map[i++] = e->dest->index;
1557 static int *saved_cfg = NULL;
1560 /* This function compares the saved version of the cfg with the
1561 current cfg and aborts if the two are identical. The function
1562 silently returns if the cfg has been marked as dirty or the two are
1566 df_check_cfg_clean (void)
1573 if (df_lr->solutions_dirty)
1576 if (saved_cfg == NULL)
1579 new_map = df_compute_cfg_image ();
1580 gcc_assert (memcmp (saved_cfg, new_map, saved_cfg[0] * sizeof (int)) == 0);
1585 /* This function builds a cfg fingerprint and squirrels it away in
1589 df_set_clean_cfg (void)
1593 saved_cfg = df_compute_cfg_image ();
1596 #endif /* DF_DEBUG_CFG */
1597 /*----------------------------------------------------------------------------
1598 PUBLIC INTERFACES TO QUERY INFORMATION.
1599 ----------------------------------------------------------------------------*/
1602 /* Return last use of REGNO within BB. */
1605 df_bb_regno_last_use_find (basic_block bb, unsigned int regno)
1608 struct df_ref **use_rec;
1611 FOR_BB_INSNS_REVERSE (bb, insn)
1616 uid = INSN_UID (insn);
1617 for (use_rec = DF_INSN_UID_USES (uid); *use_rec; use_rec++)
1619 struct df_ref *use = *use_rec;
1620 if (DF_REF_REGNO (use) == regno)
1624 if (df->changeable_flags & DF_EQ_NOTES)
1625 for (use_rec = DF_INSN_UID_EQ_USES (uid); *use_rec; use_rec++)
1627 struct df_ref *use = *use_rec;
1628 if (DF_REF_REGNO (use) == regno)
1636 /* Return first def of REGNO within BB. */
1639 df_bb_regno_first_def_find (basic_block bb, unsigned int regno)
1642 struct df_ref **def_rec;
1645 FOR_BB_INSNS (bb, insn)
1650 uid = INSN_UID (insn);
1651 for (def_rec = DF_INSN_UID_DEFS (uid); *def_rec; def_rec++)
1653 struct df_ref *def = *def_rec;
1654 if (DF_REF_REGNO (def) == regno)
1662 /* Return last def of REGNO within BB. */
1665 df_bb_regno_last_def_find (basic_block bb, unsigned int regno)
1668 struct df_ref **def_rec;
1671 FOR_BB_INSNS_REVERSE (bb, insn)
1676 uid = INSN_UID (insn);
1677 for (def_rec = DF_INSN_UID_DEFS (uid); *def_rec; def_rec++)
1679 struct df_ref *def = *def_rec;
1680 if (DF_REF_REGNO (def) == regno)
1688 /* Return true if INSN defines REGNO. */
1691 df_insn_regno_def_p (rtx insn, unsigned int regno)
1694 struct df_ref **def_rec;
1696 uid = INSN_UID (insn);
1697 for (def_rec = DF_INSN_UID_DEFS (uid); *def_rec; def_rec++)
1699 struct df_ref *def = *def_rec;
1700 if (DF_REF_REGNO (def) == regno)
1708 /* Finds the reference corresponding to the definition of REG in INSN.
1709 DF is the dataflow object. */
1712 df_find_def (rtx insn, rtx reg)
1715 struct df_ref **def_rec;
1717 if (GET_CODE (reg) == SUBREG)
1718 reg = SUBREG_REG (reg);
1719 gcc_assert (REG_P (reg));
1721 uid = INSN_UID (insn);
1722 for (def_rec = DF_INSN_UID_DEFS (uid); *def_rec; def_rec++)
1724 struct df_ref *def = *def_rec;
1725 if (rtx_equal_p (DF_REF_REAL_REG (def), reg))
1733 /* Return true if REG is defined in INSN, zero otherwise. */
1736 df_reg_defined (rtx insn, rtx reg)
1738 return df_find_def (insn, reg) != NULL;
1742 /* Finds the reference corresponding to the use of REG in INSN.
1743 DF is the dataflow object. */
1746 df_find_use (rtx insn, rtx reg)
1749 struct df_ref **use_rec;
1751 if (GET_CODE (reg) == SUBREG)
1752 reg = SUBREG_REG (reg);
1753 gcc_assert (REG_P (reg));
1755 uid = INSN_UID (insn);
1756 for (use_rec = DF_INSN_UID_USES (uid); *use_rec; use_rec++)
1758 struct df_ref *use = *use_rec;
1759 if (rtx_equal_p (DF_REF_REAL_REG (use), reg))
1762 if (df->changeable_flags & DF_EQ_NOTES)
1763 for (use_rec = DF_INSN_UID_EQ_USES (uid); *use_rec; use_rec++)
1765 struct df_ref *use = *use_rec;
1766 if (rtx_equal_p (DF_REF_REAL_REG (use), reg))
1773 /* Return true if REG is referenced in INSN, zero otherwise. */
1776 df_reg_used (rtx insn, rtx reg)
1778 return df_find_use (insn, reg) != NULL;
1782 /*----------------------------------------------------------------------------
1783 Debugging and printing functions.
1784 ----------------------------------------------------------------------------*/
1787 /* Write information about registers and basic blocks into FILE.
1788 This is part of making a debugging dump. */
1791 df_print_regset (FILE *file, bitmap r)
1797 fputs (" (nil)", file);
1800 EXECUTE_IF_SET_IN_BITMAP (r, 0, i, bi)
1802 fprintf (file, " %d", i);
1803 if (i < FIRST_PSEUDO_REGISTER)
1804 fprintf (file, " [%s]", reg_names[i]);
1807 fprintf (file, "\n");
1811 /* Dump dataflow info. */
1813 df_dump (FILE *file)
1816 df_dump_start (file);
1820 df_print_bb_index (bb, file);
1821 df_dump_top (bb, file);
1822 df_dump_bottom (bb, file);
1825 fprintf (file, "\n");
1829 /* Dump the introductory information for each problem defined. */
1832 df_dump_start (FILE *file)
1839 fprintf (file, "\n\n%s\n", current_function_name ());
1840 fprintf (file, "\nDataflow summary:\n");
1841 if (df->blocks_to_analyze)
1842 fprintf (file, "def_info->table_size = %d, use_info->table_size = %d\n",
1843 DF_DEFS_TABLE_SIZE (), DF_USES_TABLE_SIZE ());
1845 for (i = 0; i < df->num_problems_defined; i++)
1847 struct dataflow *dflow = df->problems_in_order[i];
1848 if (dflow->computed)
1850 df_dump_problem_function fun = dflow->problem->dump_start_fun;
1858 /* Dump the top of the block information for BB. */
1861 df_dump_top (basic_block bb, FILE *file)
1868 for (i = 0; i < df->num_problems_defined; i++)
1870 struct dataflow *dflow = df->problems_in_order[i];
1871 if (dflow->computed)
1873 df_dump_bb_problem_function bbfun = dflow->problem->dump_top_fun;
1881 /* Dump the bottom of the block information for BB. */
1884 df_dump_bottom (basic_block bb, FILE *file)
1891 for (i = 0; i < df->num_problems_defined; i++)
1893 struct dataflow *dflow = df->problems_in_order[i];
1894 if (dflow->computed)
1896 df_dump_bb_problem_function bbfun = dflow->problem->dump_bottom_fun;
1905 df_refs_chain_dump (struct df_ref **ref_rec, bool follow_chain, FILE *file)
1907 fprintf (file, "{ ");
1910 struct df_ref *ref = *ref_rec;
1911 fprintf (file, "%c%d(%d)",
1912 DF_REF_REG_DEF_P (ref) ? 'd' : (DF_REF_FLAGS (ref) & DF_REF_IN_NOTE) ? 'e' : 'u',
1914 DF_REF_REGNO (ref));
1916 df_chain_dump (DF_REF_CHAIN (ref), file);
1919 fprintf (file, "}");
1923 /* Dump either a ref-def or reg-use chain. */
1926 df_regs_chain_dump (struct df_ref *ref, FILE *file)
1928 fprintf (file, "{ ");
1931 fprintf (file, "%c%d(%d) ",
1932 DF_REF_REG_DEF_P (ref) ? 'd' : 'u',
1934 DF_REF_REGNO (ref));
1935 ref = ref->next_reg;
1937 fprintf (file, "}");
1942 df_mws_dump (struct df_mw_hardreg **mws, FILE *file)
1946 fprintf (file, "mw %c r[%d..%d]\n",
1947 ((*mws)->type == DF_REF_REG_DEF) ? 'd' : 'u',
1948 (*mws)->start_regno, (*mws)->end_regno);
1955 df_insn_uid_debug (unsigned int uid,
1956 bool follow_chain, FILE *file)
1958 fprintf (file, "insn %d luid %d",
1959 uid, DF_INSN_UID_LUID (uid));
1961 if (DF_INSN_UID_DEFS (uid))
1963 fprintf (file, " defs ");
1964 df_refs_chain_dump (DF_INSN_UID_DEFS (uid), follow_chain, file);
1967 if (DF_INSN_UID_USES (uid))
1969 fprintf (file, " uses ");
1970 df_refs_chain_dump (DF_INSN_UID_USES (uid), follow_chain, file);
1973 if (DF_INSN_UID_EQ_USES (uid))
1975 fprintf (file, " eq uses ");
1976 df_refs_chain_dump (DF_INSN_UID_EQ_USES (uid), follow_chain, file);
1979 if (DF_INSN_UID_MWS (uid))
1981 fprintf (file, " mws ");
1982 df_mws_dump (DF_INSN_UID_MWS (uid), file);
1984 fprintf (file, "\n");
1989 df_insn_debug (rtx insn, bool follow_chain, FILE *file)
1991 df_insn_uid_debug (INSN_UID (insn), follow_chain, file);
1995 df_insn_debug_regno (rtx insn, FILE *file)
1997 unsigned int uid = INSN_UID(insn);
1999 fprintf (file, "insn %d bb %d luid %d defs ",
2000 uid, BLOCK_FOR_INSN (insn)->index, DF_INSN_LUID (insn));
2001 df_refs_chain_dump (DF_INSN_UID_DEFS (uid), false, file);
2003 fprintf (file, " uses ");
2004 df_refs_chain_dump (DF_INSN_UID_USES (uid), false, file);
2006 fprintf (file, " eq_uses ");
2007 df_refs_chain_dump (DF_INSN_UID_EQ_USES (uid), false, file);
2008 fprintf (file, "\n");
2012 df_regno_debug (unsigned int regno, FILE *file)
2014 fprintf (file, "reg %d defs ", regno);
2015 df_regs_chain_dump (DF_REG_DEF_CHAIN (regno), file);
2016 fprintf (file, " uses ");
2017 df_regs_chain_dump (DF_REG_USE_CHAIN (regno), file);
2018 fprintf (file, " eq_uses ");
2019 df_regs_chain_dump (DF_REG_EQ_USE_CHAIN (regno), file);
2020 fprintf (file, "\n");
2025 df_ref_debug (struct df_ref *ref, FILE *file)
2027 fprintf (file, "%c%d ",
2028 DF_REF_REG_DEF_P (ref) ? 'd' : 'u',
2030 fprintf (file, "reg %d bb %d insn %d flag 0x%x type 0x%x ",
2033 DF_REF_INSN (ref) ? INSN_UID (DF_REF_INSN (ref)) : -1,
2036 if (DF_REF_LOC (ref))
2037 fprintf (file, "loc %p(%p) chain ", (void *)DF_REF_LOC (ref), (void *)*DF_REF_LOC (ref));
2039 fprintf (file, "chain ");
2040 df_chain_dump (DF_REF_CHAIN (ref), file);
2041 fprintf (file, "\n");
2044 /* Functions for debugging from GDB. */
2047 debug_df_insn (rtx insn)
2049 df_insn_debug (insn, true, stderr);
2055 debug_df_reg (rtx reg)
2057 df_regno_debug (REGNO (reg), stderr);
2062 debug_df_regno (unsigned int regno)
2064 df_regno_debug (regno, stderr);
2069 debug_df_ref (struct df_ref *ref)
2071 df_ref_debug (ref, stderr);
2076 debug_df_defno (unsigned int defno)
2078 df_ref_debug (DF_DEFS_GET (defno), stderr);
2083 debug_df_useno (unsigned int defno)
2085 df_ref_debug (DF_USES_GET (defno), stderr);
2090 debug_df_chain (struct df_link *link)
2092 df_chain_dump (link, stderr);
2093 fputc ('\n', stderr);