1 /* Global common subexpression elimination/Partial redundancy elimination
2 and global constant/copy propagation for GNU compiler.
3 Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
4 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
23 - reordering of memory allocation and freeing to be more space efficient
24 - do rough calc of how many regs are needed in each block, and a rough
25 calc of how many regs are available in each class and use that to
26 throttle back the code in cases where RTX_COST is minimal.
27 - a store to the same address as a load does not kill the load if the
28 source of the store is also the destination of the load. Handling this
29 allows more load motion, particularly out of loops.
33 /* References searched while implementing this.
35 Compilers Principles, Techniques and Tools
39 Global Optimization by Suppression of Partial Redundancies
41 communications of the acm, Vol. 22, Num. 2, Feb. 1979
43 A Portable Machine-Independent Global Optimizer - Design and Measurements
45 Stanford Ph.D. thesis, Dec. 1983
47 A Fast Algorithm for Code Movement Optimization
49 SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988
51 A Solution to a Problem with Morel and Renvoise's
52 Global Optimization by Suppression of Partial Redundancies
53 K-H Drechsler, M.P. Stadel
54 ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988
56 Practical Adaptation of the Global Optimization
57 Algorithm of Morel and Renvoise
59 ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991
61 Efficiently Computing Static Single Assignment Form and the Control
63 R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck
64 ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991
67 J. Knoop, O. Ruthing, B. Steffen
68 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
70 What's In a Region? Or Computing Control Dependence Regions in Near-Linear
71 Time for Reducible Flow Control
73 ACM Letters on Programming Languages and Systems,
74 Vol. 2, Num. 1-4, Mar-Dec 1993
76 An Efficient Representation for Sparse Sets
77 Preston Briggs, Linda Torczon
78 ACM Letters on Programming Languages and Systems,
79 Vol. 2, Num. 1-4, Mar-Dec 1993
81 A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion
82 K-H Drechsler, M.P. Stadel
83 ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993
85 Partial Dead Code Elimination
86 J. Knoop, O. Ruthing, B. Steffen
87 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
89 Effective Partial Redundancy Elimination
90 P. Briggs, K.D. Cooper
91 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
93 The Program Structure Tree: Computing Control Regions in Linear Time
94 R. Johnson, D. Pearson, K. Pingali
95 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
97 Optimal Code Motion: Theory and Practice
98 J. Knoop, O. Ruthing, B. Steffen
99 ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994
101 The power of assignment motion
102 J. Knoop, O. Ruthing, B. Steffen
103 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
105 Global code motion / global value numbering
107 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
109 Value Driven Redundancy Elimination
111 Rice University Ph.D. thesis, Apr. 1996
115 Massively Scalar Compiler Project, Rice University, Sep. 1996
117 High Performance Compilers for Parallel Computing
121 Advanced Compiler Design and Implementation
123 Morgan Kaufmann, 1997
125 Building an Optimizing Compiler
129 People wishing to speed up the code here should read:
130 Elimination Algorithms for Data Flow Analysis
131 B.G. Ryder, M.C. Paull
132 ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986
134 How to Analyze Large Programs Efficiently and Informatively
135 D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck
136 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
138 People wishing to do something different can find various possibilities
139 in the above papers and elsewhere.
144 #include "coretypes.h"
152 #include "hard-reg-set.h"
155 #include "insn-config.h"
157 #include "basic-block.h"
159 #include "function.h"
168 #include "tree-pass.h"
173 /* Propagate flow information through back edges and thus enable PRE's
174 moving loop invariant calculations out of loops.
176 Originally this tended to create worse overall code, but several
177 improvements during the development of PRE seem to have made following
178 back edges generally a win.
180 Note much of the loop invariant code motion done here would normally
181 be done by loop.c, which has more heuristics for when to move invariants
182 out of loops. At some point we might need to move some of those
183 heuristics into gcse.c. */
185 /* We support GCSE via Partial Redundancy Elimination. PRE optimizations
186 are a superset of those done by GCSE.
188 We perform the following steps:
190 1) Compute table of places where registers are set.
192 2) Perform copy/constant propagation.
194 3) Perform global cse using lazy code motion if not optimizing
195 for size, or code hoisting if we are.
197 4) Perform another pass of copy/constant propagation. Try to bypass
198 conditional jumps if the condition can be computed from a value of
201 5) Perform store motion.
203 Two passes of copy/constant propagation are done because the first one
204 enables more GCSE and the second one helps to clean up the copies that
205 GCSE creates. This is needed more for PRE than for Classic because Classic
206 GCSE will try to use an existing register containing the common
207 subexpression rather than create a new one. This is harder to do for PRE
208 because of the code motion (which Classic GCSE doesn't do).
210 Expressions we are interested in GCSE-ing are of the form
211 (set (pseudo-reg) (expression)).
212 Function want_to_gcse_p says what these are.
214 In addition, expressions in REG_EQUAL notes are candidates for GXSE-ing.
215 This allows PRE to hoist expressions that are expressed in multiple insns,
216 such as comprex address calculations (e.g. for PIC code, or loads with a
217 high part and as lowe part).
219 PRE handles moving invariant expressions out of loops (by treating them as
220 partially redundant).
222 Eventually it would be nice to replace cse.c/gcse.c with SSA (static single
223 assignment) based GVN (global value numbering). L. T. Simpson's paper
224 (Rice University) on value numbering is a useful reference for this.
226 **********************
228 We used to support multiple passes but there are diminishing returns in
229 doing so. The first pass usually makes 90% of the changes that are doable.
230 A second pass can make a few more changes made possible by the first pass.
231 Experiments show any further passes don't make enough changes to justify
234 A study of spec92 using an unlimited number of passes:
235 [1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83,
236 [6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2,
237 [12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1
239 It was found doing copy propagation between each pass enables further
242 This study was done before expressions in REG_EQUAL notes were added as
243 candidate expressions for optimization, and before the GIMPLE optimizers
244 were added. Probably, multiple passes is even less efficient now than
245 at the time when the study was conducted.
247 PRE is quite expensive in complicated functions because the DFA can take
248 a while to converge. Hence we only perform one pass.
250 **********************
252 The steps for PRE are:
254 1) Build the hash table of expressions we wish to GCSE (expr_hash_table).
256 2) Perform the data flow analysis for PRE.
258 3) Delete the redundant instructions
260 4) Insert the required copies [if any] that make the partially
261 redundant instructions fully redundant.
263 5) For other reaching expressions, insert an instruction to copy the value
264 to a newly created pseudo that will reach the redundant instruction.
266 The deletion is done first so that when we do insertions we
267 know which pseudo reg to use.
269 Various papers have argued that PRE DFA is expensive (O(n^2)) and others
270 argue it is not. The number of iterations for the algorithm to converge
271 is typically 2-4 so I don't view it as that expensive (relatively speaking).
273 PRE GCSE depends heavily on the second CSE pass to clean up the copies
274 we create. To make an expression reach the place where it's redundant,
275 the result of the expression is copied to a new register, and the redundant
276 expression is deleted by replacing it with this new register. Classic GCSE
277 doesn't have this problem as much as it computes the reaching defs of
278 each register in each block and thus can try to use an existing
281 /* GCSE global vars. */
283 /* Set to non-zero if CSE should run after all GCSE optimizations are done. */
284 int flag_rerun_cse_after_global_opts;
286 /* An obstack for our working variables. */
287 static struct obstack gcse_obstack;
289 struct reg_use {rtx reg_rtx; };
291 /* Hash table of expressions. */
295 /* The expression (SET_SRC for expressions, PATTERN for assignments). */
297 /* Index in the available expression bitmaps. */
299 /* Next entry with the same hash. */
300 struct expr *next_same_hash;
301 /* List of anticipatable occurrences in basic blocks in the function.
302 An "anticipatable occurrence" is one that is the first occurrence in the
303 basic block, the operands are not modified in the basic block prior
304 to the occurrence and the output is not used between the start of
305 the block and the occurrence. */
306 struct occr *antic_occr;
307 /* List of available occurrence in basic blocks in the function.
308 An "available occurrence" is one that is the last occurrence in the
309 basic block and the operands are not modified by following statements in
310 the basic block [including this insn]. */
311 struct occr *avail_occr;
312 /* Non-null if the computation is PRE redundant.
313 The value is the newly created pseudo-reg to record a copy of the
314 expression in all the places that reach the redundant copy. */
318 /* Occurrence of an expression.
319 There is one per basic block. If a pattern appears more than once the
320 last appearance is used [or first for anticipatable expressions]. */
324 /* Next occurrence of this expression. */
326 /* The insn that computes the expression. */
328 /* Nonzero if this [anticipatable] occurrence has been deleted. */
330 /* Nonzero if this [available] occurrence has been copied to
332 /* ??? This is mutually exclusive with deleted_p, so they could share
337 /* Expression and copy propagation hash tables.
338 Each hash table is an array of buckets.
339 ??? It is known that if it were an array of entries, structure elements
340 `next_same_hash' and `bitmap_index' wouldn't be necessary. However, it is
341 not clear whether in the final analysis a sufficient amount of memory would
342 be saved as the size of the available expression bitmaps would be larger
343 [one could build a mapping table without holes afterwards though].
344 Someday I'll perform the computation and figure it out. */
349 This is an array of `expr_hash_table_size' elements. */
352 /* Size of the hash table, in elements. */
355 /* Number of hash table elements. */
356 unsigned int n_elems;
358 /* Whether the table is expression of copy propagation one. */
362 /* Expression hash table. */
363 static struct hash_table expr_hash_table;
365 /* Copy propagation hash table. */
366 static struct hash_table set_hash_table;
368 /* This is a list of expressions which are MEMs and will be used by load
370 Load motion tracks MEMs which aren't killed by
371 anything except itself. (i.e., loads and stores to a single location).
372 We can then allow movement of these MEM refs with a little special
373 allowance. (all stores copy the same value to the reaching reg used
374 for the loads). This means all values used to store into memory must have
375 no side effects so we can re-issue the setter value.
376 Store Motion uses this structure as an expression table to track stores
377 which look interesting, and might be moveable towards the exit block. */
381 struct expr * expr; /* Gcse expression reference for LM. */
382 rtx pattern; /* Pattern of this mem. */
383 rtx pattern_regs; /* List of registers mentioned by the mem. */
384 rtx loads; /* INSN list of loads seen. */
385 rtx stores; /* INSN list of stores seen. */
386 struct ls_expr * next; /* Next in the list. */
387 int invalid; /* Invalid for some reason. */
388 int index; /* If it maps to a bitmap index. */
389 unsigned int hash_index; /* Index when in a hash table. */
390 rtx reaching_reg; /* Register to use when re-writing. */
393 /* Array of implicit set patterns indexed by basic block index. */
394 static rtx *implicit_sets;
396 /* Head of the list of load/store memory refs. */
397 static struct ls_expr * pre_ldst_mems = NULL;
399 /* Hashtable for the load/store memory refs. */
400 static htab_t pre_ldst_table = NULL;
402 /* Bitmap containing one bit for each register in the program.
403 Used when performing GCSE to track which registers have been set since
404 the start of the basic block. */
405 static regset reg_set_bitmap;
407 /* Array, indexed by basic block number for a list of insns which modify
408 memory within that block. */
409 static rtx * modify_mem_list;
410 static bitmap modify_mem_list_set;
412 /* This array parallels modify_mem_list, but is kept canonicalized. */
413 static rtx * canon_modify_mem_list;
415 /* Bitmap indexed by block numbers to record which blocks contain
417 static bitmap blocks_with_calls;
419 /* Various variables for statistics gathering. */
421 /* Memory used in a pass.
422 This isn't intended to be absolutely precise. Its intent is only
423 to keep an eye on memory usage. */
424 static int bytes_used;
426 /* GCSE substitutions made. */
427 static int gcse_subst_count;
428 /* Number of copy instructions created. */
429 static int gcse_create_count;
430 /* Number of local constants propagated. */
431 static int local_const_prop_count;
432 /* Number of local copies propagated. */
433 static int local_copy_prop_count;
434 /* Number of global constants propagated. */
435 static int global_const_prop_count;
436 /* Number of global copies propagated. */
437 static int global_copy_prop_count;
439 /* For available exprs */
440 static sbitmap *ae_kill, *ae_gen;
442 static void compute_can_copy (void);
443 static void *gmalloc (size_t) ATTRIBUTE_MALLOC;
444 static void *gcalloc (size_t, size_t) ATTRIBUTE_MALLOC;
445 static void *gcse_alloc (unsigned long);
446 static void alloc_gcse_mem (void);
447 static void free_gcse_mem (void);
448 static void hash_scan_insn (rtx, struct hash_table *);
449 static void hash_scan_set (rtx, rtx, struct hash_table *);
450 static void hash_scan_clobber (rtx, rtx, struct hash_table *);
451 static void hash_scan_call (rtx, rtx, struct hash_table *);
452 static int want_to_gcse_p (rtx);
453 static bool can_assign_to_reg_p (rtx);
454 static bool gcse_constant_p (const_rtx);
455 static int oprs_unchanged_p (const_rtx, const_rtx, int);
456 static int oprs_anticipatable_p (const_rtx, const_rtx);
457 static int oprs_available_p (const_rtx, const_rtx);
458 static void insert_expr_in_table (rtx, enum machine_mode, rtx, int, int,
459 struct hash_table *);
460 static void insert_set_in_table (rtx, rtx, struct hash_table *);
461 static unsigned int hash_expr (const_rtx, enum machine_mode, int *, int);
462 static unsigned int hash_set (int, int);
463 static int expr_equiv_p (const_rtx, const_rtx);
464 static void record_last_reg_set_info (rtx, int);
465 static void record_last_mem_set_info (rtx);
466 static void record_last_set_info (rtx, const_rtx, void *);
467 static void compute_hash_table (struct hash_table *);
468 static void alloc_hash_table (int, struct hash_table *, int);
469 static void free_hash_table (struct hash_table *);
470 static void compute_hash_table_work (struct hash_table *);
471 static void dump_hash_table (FILE *, const char *, struct hash_table *);
472 static struct expr *lookup_set (unsigned int, struct hash_table *);
473 static struct expr *next_set (unsigned int, struct expr *);
474 static void reset_opr_set_tables (void);
475 static int oprs_not_set_p (const_rtx, const_rtx);
476 static void mark_call (rtx);
477 static void mark_set (rtx, rtx);
478 static void mark_clobber (rtx, rtx);
479 static void mark_oprs_set (rtx);
480 static void alloc_cprop_mem (int, int);
481 static void free_cprop_mem (void);
482 static void compute_transp (const_rtx, int, sbitmap *, int);
483 static void compute_transpout (void);
484 static void compute_local_properties (sbitmap *, sbitmap *, sbitmap *,
485 struct hash_table *);
486 static void compute_cprop_data (void);
487 static void find_used_regs (rtx *, void *);
488 static int try_replace_reg (rtx, rtx, rtx);
489 static struct expr *find_avail_set (int, rtx);
490 static int cprop_jump (basic_block, rtx, rtx, rtx, rtx);
491 static void mems_conflict_for_gcse_p (rtx, const_rtx, void *);
492 static int load_killed_in_block_p (const_basic_block, int, const_rtx, int);
493 static void canon_list_insert (rtx, const_rtx, void *);
494 static int cprop_insn (rtx);
495 static void find_implicit_sets (void);
496 static int one_cprop_pass (void);
497 static bool constprop_register (rtx, rtx, rtx);
498 static struct expr *find_bypass_set (int, int);
499 static bool reg_killed_on_edge (const_rtx, const_edge);
500 static int bypass_block (basic_block, rtx, rtx);
501 static int bypass_conditional_jumps (void);
502 static void alloc_pre_mem (int, int);
503 static void free_pre_mem (void);
504 static void compute_pre_data (void);
505 static int pre_expr_reaches_here_p (basic_block, struct expr *,
507 static void insert_insn_end_basic_block (struct expr *, basic_block, int);
508 static void pre_insert_copy_insn (struct expr *, rtx);
509 static void pre_insert_copies (void);
510 static int pre_delete (void);
511 static int pre_gcse (void);
512 static int one_pre_gcse_pass (void);
513 static void add_label_notes (rtx, rtx);
514 static void alloc_code_hoist_mem (int, int);
515 static void free_code_hoist_mem (void);
516 static void compute_code_hoist_vbeinout (void);
517 static void compute_code_hoist_data (void);
518 static int hoist_expr_reaches_here_p (basic_block, int, basic_block, char *);
519 static int hoist_code (void);
520 static int one_code_hoisting_pass (void);
521 static rtx process_insert_insn (struct expr *);
522 static int pre_edge_insert (struct edge_list *, struct expr **);
523 static int pre_expr_reaches_here_p_work (basic_block, struct expr *,
524 basic_block, char *);
525 static struct ls_expr * ldst_entry (rtx);
526 static void free_ldst_entry (struct ls_expr *);
527 static void free_ldst_mems (void);
528 static void print_ldst_list (FILE *);
529 static struct ls_expr * find_rtx_in_ldst (rtx);
530 static int enumerate_ldsts (void);
531 static inline struct ls_expr * first_ls_expr (void);
532 static inline struct ls_expr * next_ls_expr (struct ls_expr *);
533 static int simple_mem (const_rtx);
534 static void invalidate_any_buried_refs (rtx);
535 static void compute_ld_motion_mems (void);
536 static void trim_ld_motion_mems (void);
537 static void update_ld_motion_stores (struct expr *);
538 static void reg_set_info (rtx, const_rtx, void *);
539 static void reg_clear_last_set (rtx, const_rtx, void *);
540 static bool store_ops_ok (const_rtx, int *);
541 static rtx extract_mentioned_regs (rtx);
542 static rtx extract_mentioned_regs_helper (rtx, rtx);
543 static void find_moveable_store (rtx, int *, int *);
544 static int compute_store_table (void);
545 static bool load_kills_store (const_rtx, const_rtx, int);
546 static bool find_loads (const_rtx, const_rtx, int);
547 static bool store_killed_in_insn (const_rtx, const_rtx, const_rtx, int);
548 static bool store_killed_after (const_rtx, const_rtx, const_rtx, const_basic_block, int *, rtx *);
549 static bool store_killed_before (const_rtx, const_rtx, const_rtx, const_basic_block, int *);
550 static void build_store_vectors (void);
551 static void insert_insn_start_basic_block (rtx, basic_block);
552 static int insert_store (struct ls_expr *, edge);
553 static void remove_reachable_equiv_notes (basic_block, struct ls_expr *);
554 static void replace_store_insn (rtx, rtx, basic_block, struct ls_expr *);
555 static void delete_store (struct ls_expr *, basic_block);
556 static void free_store_memory (void);
557 static int one_store_motion_pass (void);
558 static void free_insn_expr_list_list (rtx *);
559 static void clear_modify_mem_tables (void);
560 static void free_modify_mem_tables (void);
561 static rtx gcse_emit_move_after (rtx, rtx, rtx);
562 static void local_cprop_find_used_regs (rtx *, void *);
563 static bool do_local_cprop (rtx, rtx);
564 static int local_cprop_pass (void);
565 static bool is_too_expensive (const char *);
567 #define GNEW(T) ((T *) gmalloc (sizeof (T)))
568 #define GCNEW(T) ((T *) gcalloc (1, sizeof (T)))
570 #define GNEWVEC(T, N) ((T *) gmalloc (sizeof (T) * (N)))
571 #define GCNEWVEC(T, N) ((T *) gcalloc ((N), sizeof (T)))
573 #define GNEWVAR(T, S) ((T *) gmalloc ((S)))
574 #define GCNEWVAR(T, S) ((T *) gcalloc (1, (S)))
576 #define GOBNEW(T) ((T *) gcse_alloc (sizeof (T)))
577 #define GOBNEWVAR(T, S) ((T *) gcse_alloc ((S)))
579 /* Misc. utilities. */
581 /* Nonzero for each mode that supports (set (reg) (reg)).
582 This is trivially true for integer and floating point values.
583 It may or may not be true for condition codes. */
584 static char can_copy[(int) NUM_MACHINE_MODES];
586 /* Compute which modes support reg/reg copy operations. */
589 compute_can_copy (void)
592 #ifndef AVOID_CCMODE_COPIES
595 memset (can_copy, 0, NUM_MACHINE_MODES);
598 for (i = 0; i < NUM_MACHINE_MODES; i++)
599 if (GET_MODE_CLASS (i) == MODE_CC)
601 #ifdef AVOID_CCMODE_COPIES
604 reg = gen_rtx_REG ((enum machine_mode) i, LAST_VIRTUAL_REGISTER + 1);
605 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, reg));
606 if (recog (PATTERN (insn), insn, NULL) >= 0)
616 /* Returns whether the mode supports reg/reg copy operations. */
619 can_copy_p (enum machine_mode mode)
621 static bool can_copy_init_p = false;
623 if (! can_copy_init_p)
626 can_copy_init_p = true;
629 return can_copy[mode] != 0;
633 /* Cover function to xmalloc to record bytes allocated. */
636 gmalloc (size_t size)
639 return xmalloc (size);
642 /* Cover function to xcalloc to record bytes allocated. */
645 gcalloc (size_t nelem, size_t elsize)
647 bytes_used += nelem * elsize;
648 return xcalloc (nelem, elsize);
651 /* Cover function to obstack_alloc. */
654 gcse_alloc (unsigned long size)
657 return obstack_alloc (&gcse_obstack, size);
660 /* Allocate memory for the reg/memory set tracking tables.
661 This is called at the start of each pass. */
664 alloc_gcse_mem (void)
666 /* Allocate vars to track sets of regs. */
667 reg_set_bitmap = BITMAP_ALLOC (NULL);
669 /* Allocate array to keep a list of insns which modify memory in each
671 modify_mem_list = GCNEWVEC (rtx, last_basic_block);
672 canon_modify_mem_list = GCNEWVEC (rtx, last_basic_block);
673 modify_mem_list_set = BITMAP_ALLOC (NULL);
674 blocks_with_calls = BITMAP_ALLOC (NULL);
677 /* Free memory allocated by alloc_gcse_mem. */
682 free_modify_mem_tables ();
683 BITMAP_FREE (modify_mem_list_set);
684 BITMAP_FREE (blocks_with_calls);
687 /* Compute the local properties of each recorded expression.
689 Local properties are those that are defined by the block, irrespective of
692 An expression is transparent in a block if its operands are not modified
695 An expression is computed (locally available) in a block if it is computed
696 at least once and expression would contain the same value if the
697 computation was moved to the end of the block.
699 An expression is locally anticipatable in a block if it is computed at
700 least once and expression would contain the same value if the computation
701 was moved to the beginning of the block.
703 We call this routine for cprop, pre and code hoisting. They all compute
704 basically the same information and thus can easily share this code.
706 TRANSP, COMP, and ANTLOC are destination sbitmaps for recording local
707 properties. If NULL, then it is not necessary to compute or record that
710 TABLE controls which hash table to look at. If it is set hash table,
711 additionally, TRANSP is computed as ~TRANSP, since this is really cprop's
715 compute_local_properties (sbitmap *transp, sbitmap *comp, sbitmap *antloc,
716 struct hash_table *table)
720 /* Initialize any bitmaps that were passed in. */
724 sbitmap_vector_zero (transp, last_basic_block);
726 sbitmap_vector_ones (transp, last_basic_block);
730 sbitmap_vector_zero (comp, last_basic_block);
732 sbitmap_vector_zero (antloc, last_basic_block);
734 for (i = 0; i < table->size; i++)
738 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
740 int indx = expr->bitmap_index;
743 /* The expression is transparent in this block if it is not killed.
744 We start by assuming all are transparent [none are killed], and
745 then reset the bits for those that are. */
747 compute_transp (expr->expr, indx, transp, table->set_p);
749 /* The occurrences recorded in antic_occr are exactly those that
750 we want to set to nonzero in ANTLOC. */
752 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
754 SET_BIT (antloc[BLOCK_NUM (occr->insn)], indx);
756 /* While we're scanning the table, this is a good place to
761 /* The occurrences recorded in avail_occr are exactly those that
762 we want to set to nonzero in COMP. */
764 for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
766 SET_BIT (comp[BLOCK_NUM (occr->insn)], indx);
768 /* While we're scanning the table, this is a good place to
773 /* While we're scanning the table, this is a good place to
775 expr->reaching_reg = 0;
780 /* Hash table support. */
782 struct reg_avail_info
789 static struct reg_avail_info *reg_avail_info;
790 static basic_block current_bb;
793 /* See whether X, the source of a set, is something we want to consider for
797 want_to_gcse_p (rtx x)
800 /* On register stack architectures, don't GCSE constants from the
801 constant pool, as the benefits are often swamped by the overhead
802 of shuffling the register stack between basic blocks. */
803 if (IS_STACK_MODE (GET_MODE (x)))
804 x = avoid_constant_pool_reference (x);
807 switch (GET_CODE (x))
819 return can_assign_to_reg_p (x);
823 /* Used internally by can_assign_to_reg_p. */
825 static GTY(()) rtx test_insn;
827 /* Return true if we can assign X to a pseudo register. */
830 can_assign_to_reg_p (rtx x)
832 int num_clobbers = 0;
835 /* If this is a valid operand, we are OK. If it's VOIDmode, we aren't. */
836 if (general_operand (x, GET_MODE (x)))
838 else if (GET_MODE (x) == VOIDmode)
841 /* Otherwise, check if we can make a valid insn from it. First initialize
842 our test insn if we haven't already. */
846 = make_insn_raw (gen_rtx_SET (VOIDmode,
847 gen_rtx_REG (word_mode,
848 FIRST_PSEUDO_REGISTER * 2),
850 NEXT_INSN (test_insn) = PREV_INSN (test_insn) = 0;
853 /* Now make an insn like the one we would make when GCSE'ing and see if
855 PUT_MODE (SET_DEST (PATTERN (test_insn)), GET_MODE (x));
856 SET_SRC (PATTERN (test_insn)) = x;
857 return ((icode = recog (PATTERN (test_insn), test_insn, &num_clobbers)) >= 0
858 && (num_clobbers == 0 || ! added_clobbers_hard_reg_p (icode)));
861 /* Return nonzero if the operands of expression X are unchanged from the
862 start of INSN's basic block up to but not including INSN (if AVAIL_P == 0),
863 or from INSN to the end of INSN's basic block (if AVAIL_P != 0). */
866 oprs_unchanged_p (const_rtx x, const_rtx insn, int avail_p)
880 struct reg_avail_info *info = ®_avail_info[REGNO (x)];
882 if (info->last_bb != current_bb)
885 return info->last_set < DF_INSN_LUID (insn);
887 return info->first_set >= DF_INSN_LUID (insn);
891 if (load_killed_in_block_p (current_bb, DF_INSN_LUID (insn),
895 return oprs_unchanged_p (XEXP (x, 0), insn, avail_p);
922 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
926 /* If we are about to do the last recursive call needed at this
927 level, change it into iteration. This function is called enough
930 return oprs_unchanged_p (XEXP (x, i), insn, avail_p);
932 else if (! oprs_unchanged_p (XEXP (x, i), insn, avail_p))
935 else if (fmt[i] == 'E')
936 for (j = 0; j < XVECLEN (x, i); j++)
937 if (! oprs_unchanged_p (XVECEXP (x, i, j), insn, avail_p))
944 /* Used for communication between mems_conflict_for_gcse_p and
945 load_killed_in_block_p. Nonzero if mems_conflict_for_gcse_p finds a
946 conflict between two memory references. */
947 static int gcse_mems_conflict_p;
949 /* Used for communication between mems_conflict_for_gcse_p and
950 load_killed_in_block_p. A memory reference for a load instruction,
951 mems_conflict_for_gcse_p will see if a memory store conflicts with
953 static const_rtx gcse_mem_operand;
955 /* DEST is the output of an instruction. If it is a memory reference, and
956 possibly conflicts with the load found in gcse_mem_operand, then set
957 gcse_mems_conflict_p to a nonzero value. */
960 mems_conflict_for_gcse_p (rtx dest, const_rtx setter ATTRIBUTE_UNUSED,
961 void *data ATTRIBUTE_UNUSED)
963 while (GET_CODE (dest) == SUBREG
964 || GET_CODE (dest) == ZERO_EXTRACT
965 || GET_CODE (dest) == STRICT_LOW_PART)
966 dest = XEXP (dest, 0);
968 /* If DEST is not a MEM, then it will not conflict with the load. Note
969 that function calls are assumed to clobber memory, but are handled
974 /* If we are setting a MEM in our list of specially recognized MEMs,
975 don't mark as killed this time. */
977 if (expr_equiv_p (dest, gcse_mem_operand) && pre_ldst_mems != NULL)
979 if (!find_rtx_in_ldst (dest))
980 gcse_mems_conflict_p = 1;
984 if (true_dependence (dest, GET_MODE (dest), gcse_mem_operand,
986 gcse_mems_conflict_p = 1;
989 /* Return nonzero if the expression in X (a memory reference) is killed
990 in block BB before or after the insn with the LUID in UID_LIMIT.
991 AVAIL_P is nonzero for kills after UID_LIMIT, and zero for kills
994 To check the entire block, set UID_LIMIT to max_uid + 1 and
998 load_killed_in_block_p (const_basic_block bb, int uid_limit, const_rtx x, int avail_p)
1000 rtx list_entry = modify_mem_list[bb->index];
1002 /* If this is a readonly then we aren't going to be changing it. */
1003 if (MEM_READONLY_P (x))
1009 /* Ignore entries in the list that do not apply. */
1011 && DF_INSN_LUID (XEXP (list_entry, 0)) < uid_limit)
1013 && DF_INSN_LUID (XEXP (list_entry, 0)) > uid_limit))
1015 list_entry = XEXP (list_entry, 1);
1019 setter = XEXP (list_entry, 0);
1021 /* If SETTER is a call everything is clobbered. Note that calls
1022 to pure functions are never put on the list, so we need not
1023 worry about them. */
1024 if (CALL_P (setter))
1027 /* SETTER must be an INSN of some kind that sets memory. Call
1028 note_stores to examine each hunk of memory that is modified.
1030 The note_stores interface is pretty limited, so we have to
1031 communicate via global variables. Yuk. */
1032 gcse_mem_operand = x;
1033 gcse_mems_conflict_p = 0;
1034 note_stores (PATTERN (setter), mems_conflict_for_gcse_p, NULL);
1035 if (gcse_mems_conflict_p)
1037 list_entry = XEXP (list_entry, 1);
1042 /* Return nonzero if the operands of expression X are unchanged from
1043 the start of INSN's basic block up to but not including INSN. */
1046 oprs_anticipatable_p (const_rtx x, const_rtx insn)
1048 return oprs_unchanged_p (x, insn, 0);
1051 /* Return nonzero if the operands of expression X are unchanged from
1052 INSN to the end of INSN's basic block. */
1055 oprs_available_p (const_rtx x, const_rtx insn)
1057 return oprs_unchanged_p (x, insn, 1);
1060 /* Hash expression X.
1062 MODE is only used if X is a CONST_INT. DO_NOT_RECORD_P is a boolean
1063 indicating if a volatile operand is found or if the expression contains
1064 something we don't want to insert in the table. HASH_TABLE_SIZE is
1065 the current size of the hash table to be probed. */
1068 hash_expr (const_rtx x, enum machine_mode mode, int *do_not_record_p,
1069 int hash_table_size)
1073 *do_not_record_p = 0;
1075 hash = hash_rtx (x, mode, do_not_record_p,
1076 NULL, /*have_reg_qty=*/false);
1077 return hash % hash_table_size;
1080 /* Hash a set of register REGNO.
1082 Sets are hashed on the register that is set. This simplifies the PRE copy
1085 ??? May need to make things more elaborate. Later, as necessary. */
1088 hash_set (int regno, int hash_table_size)
1093 return hash % hash_table_size;
1096 /* Return nonzero if exp1 is equivalent to exp2. */
1099 expr_equiv_p (const_rtx x, const_rtx y)
1101 return exp_equiv_p (x, y, 0, true);
1104 /* Insert expression X in INSN in the hash TABLE.
1105 If it is already present, record it as the last occurrence in INSN's
1108 MODE is the mode of the value X is being stored into.
1109 It is only used if X is a CONST_INT.
1111 ANTIC_P is nonzero if X is an anticipatable expression.
1112 AVAIL_P is nonzero if X is an available expression. */
1115 insert_expr_in_table (rtx x, enum machine_mode mode, rtx insn, int antic_p,
1116 int avail_p, struct hash_table *table)
1118 int found, do_not_record_p;
1120 struct expr *cur_expr, *last_expr = NULL;
1121 struct occr *antic_occr, *avail_occr;
1123 hash = hash_expr (x, mode, &do_not_record_p, table->size);
1125 /* Do not insert expression in table if it contains volatile operands,
1126 or if hash_expr determines the expression is something we don't want
1127 to or can't handle. */
1128 if (do_not_record_p)
1131 cur_expr = table->table[hash];
1134 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
1136 /* If the expression isn't found, save a pointer to the end of
1138 last_expr = cur_expr;
1139 cur_expr = cur_expr->next_same_hash;
1144 cur_expr = GOBNEW (struct expr);
1145 bytes_used += sizeof (struct expr);
1146 if (table->table[hash] == NULL)
1147 /* This is the first pattern that hashed to this index. */
1148 table->table[hash] = cur_expr;
1150 /* Add EXPR to end of this hash chain. */
1151 last_expr->next_same_hash = cur_expr;
1153 /* Set the fields of the expr element. */
1155 cur_expr->bitmap_index = table->n_elems++;
1156 cur_expr->next_same_hash = NULL;
1157 cur_expr->antic_occr = NULL;
1158 cur_expr->avail_occr = NULL;
1161 /* Now record the occurrence(s). */
1164 antic_occr = cur_expr->antic_occr;
1166 if (antic_occr && BLOCK_NUM (antic_occr->insn) != BLOCK_NUM (insn))
1170 /* Found another instance of the expression in the same basic block.
1171 Prefer the currently recorded one. We want the first one in the
1172 block and the block is scanned from start to end. */
1173 ; /* nothing to do */
1176 /* First occurrence of this expression in this basic block. */
1177 antic_occr = GOBNEW (struct occr);
1178 bytes_used += sizeof (struct occr);
1179 antic_occr->insn = insn;
1180 antic_occr->next = cur_expr->antic_occr;
1181 antic_occr->deleted_p = 0;
1182 cur_expr->antic_occr = antic_occr;
1188 avail_occr = cur_expr->avail_occr;
1190 if (avail_occr && BLOCK_NUM (avail_occr->insn) == BLOCK_NUM (insn))
1192 /* Found another instance of the expression in the same basic block.
1193 Prefer this occurrence to the currently recorded one. We want
1194 the last one in the block and the block is scanned from start
1196 avail_occr->insn = insn;
1200 /* First occurrence of this expression in this basic block. */
1201 avail_occr = GOBNEW (struct occr);
1202 bytes_used += sizeof (struct occr);
1203 avail_occr->insn = insn;
1204 avail_occr->next = cur_expr->avail_occr;
1205 avail_occr->deleted_p = 0;
1206 cur_expr->avail_occr = avail_occr;
1211 /* Insert pattern X in INSN in the hash table.
1212 X is a SET of a reg to either another reg or a constant.
1213 If it is already present, record it as the last occurrence in INSN's
1217 insert_set_in_table (rtx x, rtx insn, struct hash_table *table)
1221 struct expr *cur_expr, *last_expr = NULL;
1222 struct occr *cur_occr;
1224 gcc_assert (GET_CODE (x) == SET && REG_P (SET_DEST (x)));
1226 hash = hash_set (REGNO (SET_DEST (x)), table->size);
1228 cur_expr = table->table[hash];
1231 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
1233 /* If the expression isn't found, save a pointer to the end of
1235 last_expr = cur_expr;
1236 cur_expr = cur_expr->next_same_hash;
1241 cur_expr = GOBNEW (struct expr);
1242 bytes_used += sizeof (struct expr);
1243 if (table->table[hash] == NULL)
1244 /* This is the first pattern that hashed to this index. */
1245 table->table[hash] = cur_expr;
1247 /* Add EXPR to end of this hash chain. */
1248 last_expr->next_same_hash = cur_expr;
1250 /* Set the fields of the expr element.
1251 We must copy X because it can be modified when copy propagation is
1252 performed on its operands. */
1253 cur_expr->expr = copy_rtx (x);
1254 cur_expr->bitmap_index = table->n_elems++;
1255 cur_expr->next_same_hash = NULL;
1256 cur_expr->antic_occr = NULL;
1257 cur_expr->avail_occr = NULL;
1260 /* Now record the occurrence. */
1261 cur_occr = cur_expr->avail_occr;
1263 if (cur_occr && BLOCK_NUM (cur_occr->insn) == BLOCK_NUM (insn))
1265 /* Found another instance of the expression in the same basic block.
1266 Prefer this occurrence to the currently recorded one. We want
1267 the last one in the block and the block is scanned from start
1269 cur_occr->insn = insn;
1273 /* First occurrence of this expression in this basic block. */
1274 cur_occr = GOBNEW (struct occr);
1275 bytes_used += sizeof (struct occr);
1276 cur_occr->insn = insn;
1277 cur_occr->next = cur_expr->avail_occr;
1278 cur_occr->deleted_p = 0;
1279 cur_expr->avail_occr = cur_occr;
1283 /* Determine whether the rtx X should be treated as a constant for
1284 the purposes of GCSE's constant propagation. */
1287 gcse_constant_p (const_rtx x)
1289 /* Consider a COMPARE of two integers constant. */
1290 if (GET_CODE (x) == COMPARE
1291 && GET_CODE (XEXP (x, 0)) == CONST_INT
1292 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1295 /* Consider a COMPARE of the same registers is a constant
1296 if they are not floating point registers. */
1297 if (GET_CODE(x) == COMPARE
1298 && REG_P (XEXP (x, 0)) && REG_P (XEXP (x, 1))
1299 && REGNO (XEXP (x, 0)) == REGNO (XEXP (x, 1))
1300 && ! FLOAT_MODE_P (GET_MODE (XEXP (x, 0)))
1301 && ! FLOAT_MODE_P (GET_MODE (XEXP (x, 1))))
1304 /* Since X might be inserted more than once we have to take care that it
1306 return CONSTANT_P (x) && (GET_CODE (x) != CONST || shared_const_p (x));
1309 /* Scan pattern PAT of INSN and add an entry to the hash TABLE (set or
1313 hash_scan_set (rtx pat, rtx insn, struct hash_table *table)
1315 rtx src = SET_SRC (pat);
1316 rtx dest = SET_DEST (pat);
1319 if (GET_CODE (src) == CALL)
1320 hash_scan_call (src, insn, table);
1322 else if (REG_P (dest))
1324 unsigned int regno = REGNO (dest);
1327 /* See if a REG_EQUAL note shows this equivalent to a simpler expression.
1329 This allows us to do a single GCSE pass and still eliminate
1330 redundant constants, addresses or other expressions that are
1331 constructed with multiple instructions.
1333 However, keep the original SRC if INSN is a simple reg-reg move. In
1334 In this case, there will almost always be a REG_EQUAL note on the
1335 insn that sets SRC. By recording the REG_EQUAL value here as SRC
1336 for INSN, we miss copy propagation opportunities and we perform the
1337 same PRE GCSE operation repeatedly on the same REG_EQUAL value if we
1338 do more than one PRE GCSE pass.
1340 Note that this does not impede profitable constant propagations. We
1341 "look through" reg-reg sets in lookup_avail_set. */
1342 note = find_reg_equal_equiv_note (insn);
1344 && REG_NOTE_KIND (note) == REG_EQUAL
1347 ? gcse_constant_p (XEXP (note, 0))
1348 : want_to_gcse_p (XEXP (note, 0))))
1349 src = XEXP (note, 0), pat = gen_rtx_SET (VOIDmode, dest, src);
1351 /* Only record sets of pseudo-regs in the hash table. */
1353 && regno >= FIRST_PSEUDO_REGISTER
1354 /* Don't GCSE something if we can't do a reg/reg copy. */
1355 && can_copy_p (GET_MODE (dest))
1356 /* GCSE commonly inserts instruction after the insn. We can't
1357 do that easily for EH_REGION notes so disable GCSE on these
1359 && !find_reg_note (insn, REG_EH_REGION, NULL_RTX)
1360 /* Is SET_SRC something we want to gcse? */
1361 && want_to_gcse_p (src)
1362 /* Don't CSE a nop. */
1363 && ! set_noop_p (pat)
1364 /* Don't GCSE if it has attached REG_EQUIV note.
1365 At this point this only function parameters should have
1366 REG_EQUIV notes and if the argument slot is used somewhere
1367 explicitly, it means address of parameter has been taken,
1368 so we should not extend the lifetime of the pseudo. */
1369 && (note == NULL_RTX || ! MEM_P (XEXP (note, 0))))
1371 /* An expression is not anticipatable if its operands are
1372 modified before this insn or if this is not the only SET in
1373 this insn. The latter condition does not have to mean that
1374 SRC itself is not anticipatable, but we just will not be
1375 able to handle code motion of insns with multiple sets. */
1376 int antic_p = oprs_anticipatable_p (src, insn)
1377 && !multiple_sets (insn);
1378 /* An expression is not available if its operands are
1379 subsequently modified, including this insn. It's also not
1380 available if this is a branch, because we can't insert
1381 a set after the branch. */
1382 int avail_p = (oprs_available_p (src, insn)
1383 && ! JUMP_P (insn));
1385 insert_expr_in_table (src, GET_MODE (dest), insn, antic_p, avail_p, table);
1388 /* Record sets for constant/copy propagation. */
1389 else if (table->set_p
1390 && regno >= FIRST_PSEUDO_REGISTER
1392 && REGNO (src) >= FIRST_PSEUDO_REGISTER
1393 && can_copy_p (GET_MODE (dest))
1394 && REGNO (src) != regno)
1395 || gcse_constant_p (src))
1396 /* A copy is not available if its src or dest is subsequently
1397 modified. Here we want to search from INSN+1 on, but
1398 oprs_available_p searches from INSN on. */
1399 && (insn == BB_END (BLOCK_FOR_INSN (insn))
1400 || (tmp = next_nonnote_insn (insn)) == NULL_RTX
1401 || BLOCK_FOR_INSN (tmp) != BLOCK_FOR_INSN (insn)
1402 || oprs_available_p (pat, tmp)))
1403 insert_set_in_table (pat, insn, table);
1405 /* In case of store we want to consider the memory value as available in
1406 the REG stored in that memory. This makes it possible to remove
1407 redundant loads from due to stores to the same location. */
1408 else if (flag_gcse_las && REG_P (src) && MEM_P (dest))
1410 unsigned int regno = REGNO (src);
1412 /* Do not do this for constant/copy propagation. */
1414 /* Only record sets of pseudo-regs in the hash table. */
1415 && regno >= FIRST_PSEUDO_REGISTER
1416 /* Don't GCSE something if we can't do a reg/reg copy. */
1417 && can_copy_p (GET_MODE (src))
1418 /* GCSE commonly inserts instruction after the insn. We can't
1419 do that easily for EH_REGION notes so disable GCSE on these
1421 && ! find_reg_note (insn, REG_EH_REGION, NULL_RTX)
1422 /* Is SET_DEST something we want to gcse? */
1423 && want_to_gcse_p (dest)
1424 /* Don't CSE a nop. */
1425 && ! set_noop_p (pat)
1426 /* Don't GCSE if it has attached REG_EQUIV note.
1427 At this point this only function parameters should have
1428 REG_EQUIV notes and if the argument slot is used somewhere
1429 explicitly, it means address of parameter has been taken,
1430 so we should not extend the lifetime of the pseudo. */
1431 && ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) == 0
1432 || ! MEM_P (XEXP (note, 0))))
1434 /* Stores are never anticipatable. */
1436 /* An expression is not available if its operands are
1437 subsequently modified, including this insn. It's also not
1438 available if this is a branch, because we can't insert
1439 a set after the branch. */
1440 int avail_p = oprs_available_p (dest, insn)
1443 /* Record the memory expression (DEST) in the hash table. */
1444 insert_expr_in_table (dest, GET_MODE (dest), insn,
1445 antic_p, avail_p, table);
1451 hash_scan_clobber (rtx x ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED,
1452 struct hash_table *table ATTRIBUTE_UNUSED)
1454 /* Currently nothing to do. */
1458 hash_scan_call (rtx x ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED,
1459 struct hash_table *table ATTRIBUTE_UNUSED)
1461 /* Currently nothing to do. */
1464 /* Process INSN and add hash table entries as appropriate.
1466 Only available expressions that set a single pseudo-reg are recorded.
1468 Single sets in a PARALLEL could be handled, but it's an extra complication
1469 that isn't dealt with right now. The trick is handling the CLOBBERs that
1470 are also in the PARALLEL. Later.
1472 If SET_P is nonzero, this is for the assignment hash table,
1473 otherwise it is for the expression hash table. */
1476 hash_scan_insn (rtx insn, struct hash_table *table)
1478 rtx pat = PATTERN (insn);
1481 /* Pick out the sets of INSN and for other forms of instructions record
1482 what's been modified. */
1484 if (GET_CODE (pat) == SET)
1485 hash_scan_set (pat, insn, table);
1486 else if (GET_CODE (pat) == PARALLEL)
1487 for (i = 0; i < XVECLEN (pat, 0); i++)
1489 rtx x = XVECEXP (pat, 0, i);
1491 if (GET_CODE (x) == SET)
1492 hash_scan_set (x, insn, table);
1493 else if (GET_CODE (x) == CLOBBER)
1494 hash_scan_clobber (x, insn, table);
1495 else if (GET_CODE (x) == CALL)
1496 hash_scan_call (x, insn, table);
1499 else if (GET_CODE (pat) == CLOBBER)
1500 hash_scan_clobber (pat, insn, table);
1501 else if (GET_CODE (pat) == CALL)
1502 hash_scan_call (pat, insn, table);
1506 dump_hash_table (FILE *file, const char *name, struct hash_table *table)
1509 /* Flattened out table, so it's printed in proper order. */
1510 struct expr **flat_table;
1511 unsigned int *hash_val;
1514 flat_table = XCNEWVEC (struct expr *, table->n_elems);
1515 hash_val = XNEWVEC (unsigned int, table->n_elems);
1517 for (i = 0; i < (int) table->size; i++)
1518 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
1520 flat_table[expr->bitmap_index] = expr;
1521 hash_val[expr->bitmap_index] = i;
1524 fprintf (file, "%s hash table (%d buckets, %d entries)\n",
1525 name, table->size, table->n_elems);
1527 for (i = 0; i < (int) table->n_elems; i++)
1528 if (flat_table[i] != 0)
1530 expr = flat_table[i];
1531 fprintf (file, "Index %d (hash value %d)\n ",
1532 expr->bitmap_index, hash_val[i]);
1533 print_rtl (file, expr->expr);
1534 fprintf (file, "\n");
1537 fprintf (file, "\n");
1543 /* Record register first/last/block set information for REGNO in INSN.
1545 first_set records the first place in the block where the register
1546 is set and is used to compute "anticipatability".
1548 last_set records the last place in the block where the register
1549 is set and is used to compute "availability".
1551 last_bb records the block for which first_set and last_set are
1552 valid, as a quick test to invalidate them. */
1555 record_last_reg_set_info (rtx insn, int regno)
1557 struct reg_avail_info *info = ®_avail_info[regno];
1558 int luid = DF_INSN_LUID (insn);
1560 info->last_set = luid;
1561 if (info->last_bb != current_bb)
1563 info->last_bb = current_bb;
1564 info->first_set = luid;
1569 /* Record all of the canonicalized MEMs of record_last_mem_set_info's insn.
1570 Note we store a pair of elements in the list, so they have to be
1571 taken off pairwise. */
1574 canon_list_insert (rtx dest ATTRIBUTE_UNUSED, const_rtx unused1 ATTRIBUTE_UNUSED,
1577 rtx dest_addr, insn;
1580 while (GET_CODE (dest) == SUBREG
1581 || GET_CODE (dest) == ZERO_EXTRACT
1582 || GET_CODE (dest) == STRICT_LOW_PART)
1583 dest = XEXP (dest, 0);
1585 /* If DEST is not a MEM, then it will not conflict with a load. Note
1586 that function calls are assumed to clobber memory, but are handled
1592 dest_addr = get_addr (XEXP (dest, 0));
1593 dest_addr = canon_rtx (dest_addr);
1594 insn = (rtx) v_insn;
1595 bb = BLOCK_NUM (insn);
1597 canon_modify_mem_list[bb] =
1598 alloc_EXPR_LIST (VOIDmode, dest_addr, canon_modify_mem_list[bb]);
1599 canon_modify_mem_list[bb] =
1600 alloc_EXPR_LIST (VOIDmode, dest, canon_modify_mem_list[bb]);
1603 /* Record memory modification information for INSN. We do not actually care
1604 about the memory location(s) that are set, or even how they are set (consider
1605 a CALL_INSN). We merely need to record which insns modify memory. */
1608 record_last_mem_set_info (rtx insn)
1610 int bb = BLOCK_NUM (insn);
1612 /* load_killed_in_block_p will handle the case of calls clobbering
1614 modify_mem_list[bb] = alloc_INSN_LIST (insn, modify_mem_list[bb]);
1615 bitmap_set_bit (modify_mem_list_set, bb);
1619 /* Note that traversals of this loop (other than for free-ing)
1620 will break after encountering a CALL_INSN. So, there's no
1621 need to insert a pair of items, as canon_list_insert does. */
1622 canon_modify_mem_list[bb] =
1623 alloc_INSN_LIST (insn, canon_modify_mem_list[bb]);
1624 bitmap_set_bit (blocks_with_calls, bb);
1627 note_stores (PATTERN (insn), canon_list_insert, (void*) insn);
1630 /* Called from compute_hash_table via note_stores to handle one
1631 SET or CLOBBER in an insn. DATA is really the instruction in which
1632 the SET is taking place. */
1635 record_last_set_info (rtx dest, const_rtx setter ATTRIBUTE_UNUSED, void *data)
1637 rtx last_set_insn = (rtx) data;
1639 if (GET_CODE (dest) == SUBREG)
1640 dest = SUBREG_REG (dest);
1643 record_last_reg_set_info (last_set_insn, REGNO (dest));
1644 else if (MEM_P (dest)
1645 /* Ignore pushes, they clobber nothing. */
1646 && ! push_operand (dest, GET_MODE (dest)))
1647 record_last_mem_set_info (last_set_insn);
1650 /* Top level function to create an expression or assignment hash table.
1652 Expression entries are placed in the hash table if
1653 - they are of the form (set (pseudo-reg) src),
1654 - src is something we want to perform GCSE on,
1655 - none of the operands are subsequently modified in the block
1657 Assignment entries are placed in the hash table if
1658 - they are of the form (set (pseudo-reg) src),
1659 - src is something we want to perform const/copy propagation on,
1660 - none of the operands or target are subsequently modified in the block
1662 Currently src must be a pseudo-reg or a const_int.
1664 TABLE is the table computed. */
1667 compute_hash_table_work (struct hash_table *table)
1671 /* re-Cache any INSN_LIST nodes we have allocated. */
1672 clear_modify_mem_tables ();
1673 /* Some working arrays used to track first and last set in each block. */
1674 reg_avail_info = GNEWVEC (struct reg_avail_info, max_reg_num ());
1676 for (i = 0; i < max_reg_num (); ++i)
1677 reg_avail_info[i].last_bb = NULL;
1679 FOR_EACH_BB (current_bb)
1684 /* First pass over the instructions records information used to
1685 determine when registers and memory are first and last set. */
1686 FOR_BB_INSNS (current_bb, insn)
1688 if (! INSN_P (insn))
1693 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
1694 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
1695 record_last_reg_set_info (insn, regno);
1700 note_stores (PATTERN (insn), record_last_set_info, insn);
1703 /* Insert implicit sets in the hash table. */
1705 && implicit_sets[current_bb->index] != NULL_RTX)
1706 hash_scan_set (implicit_sets[current_bb->index],
1707 BB_HEAD (current_bb), table);
1709 /* The next pass builds the hash table. */
1710 FOR_BB_INSNS (current_bb, insn)
1712 hash_scan_insn (insn, table);
1715 free (reg_avail_info);
1716 reg_avail_info = NULL;
1719 /* Allocate space for the set/expr hash TABLE.
1720 N_INSNS is the number of instructions in the function.
1721 It is used to determine the number of buckets to use.
1722 SET_P determines whether set or expression table will
1726 alloc_hash_table (int n_insns, struct hash_table *table, int set_p)
1730 table->size = n_insns / 4;
1731 if (table->size < 11)
1734 /* Attempt to maintain efficient use of hash table.
1735 Making it an odd number is simplest for now.
1736 ??? Later take some measurements. */
1738 n = table->size * sizeof (struct expr *);
1739 table->table = GNEWVAR (struct expr *, n);
1740 table->set_p = set_p;
1743 /* Free things allocated by alloc_hash_table. */
1746 free_hash_table (struct hash_table *table)
1748 free (table->table);
1751 /* Compute the hash TABLE for doing copy/const propagation or
1752 expression hash table. */
1755 compute_hash_table (struct hash_table *table)
1757 /* Initialize count of number of entries in hash table. */
1759 memset (table->table, 0, table->size * sizeof (struct expr *));
1761 compute_hash_table_work (table);
1764 /* Expression tracking support. */
1766 /* Lookup REGNO in the set TABLE. The result is a pointer to the
1767 table entry, or NULL if not found. */
1769 static struct expr *
1770 lookup_set (unsigned int regno, struct hash_table *table)
1772 unsigned int hash = hash_set (regno, table->size);
1775 expr = table->table[hash];
1777 while (expr && REGNO (SET_DEST (expr->expr)) != regno)
1778 expr = expr->next_same_hash;
1783 /* Return the next entry for REGNO in list EXPR. */
1785 static struct expr *
1786 next_set (unsigned int regno, struct expr *expr)
1789 expr = expr->next_same_hash;
1790 while (expr && REGNO (SET_DEST (expr->expr)) != regno);
1795 /* Like free_INSN_LIST_list or free_EXPR_LIST_list, except that the node
1796 types may be mixed. */
1799 free_insn_expr_list_list (rtx *listp)
1803 for (list = *listp; list ; list = next)
1805 next = XEXP (list, 1);
1806 if (GET_CODE (list) == EXPR_LIST)
1807 free_EXPR_LIST_node (list);
1809 free_INSN_LIST_node (list);
1815 /* Clear canon_modify_mem_list and modify_mem_list tables. */
1817 clear_modify_mem_tables (void)
1822 EXECUTE_IF_SET_IN_BITMAP (modify_mem_list_set, 0, i, bi)
1824 free_INSN_LIST_list (modify_mem_list + i);
1825 free_insn_expr_list_list (canon_modify_mem_list + i);
1827 bitmap_clear (modify_mem_list_set);
1828 bitmap_clear (blocks_with_calls);
1831 /* Release memory used by modify_mem_list_set. */
1834 free_modify_mem_tables (void)
1836 clear_modify_mem_tables ();
1837 free (modify_mem_list);
1838 free (canon_modify_mem_list);
1839 modify_mem_list = 0;
1840 canon_modify_mem_list = 0;
1843 /* Reset tables used to keep track of what's still available [since the
1844 start of the block]. */
1847 reset_opr_set_tables (void)
1849 /* Maintain a bitmap of which regs have been set since beginning of
1851 CLEAR_REG_SET (reg_set_bitmap);
1853 /* Also keep a record of the last instruction to modify memory.
1854 For now this is very trivial, we only record whether any memory
1855 location has been modified. */
1856 clear_modify_mem_tables ();
1859 /* Return nonzero if the operands of X are not set before INSN in
1860 INSN's basic block. */
1863 oprs_not_set_p (const_rtx x, const_rtx insn)
1872 code = GET_CODE (x);
1889 if (load_killed_in_block_p (BLOCK_FOR_INSN (insn),
1890 DF_INSN_LUID (insn), x, 0))
1893 return oprs_not_set_p (XEXP (x, 0), insn);
1896 return ! REGNO_REG_SET_P (reg_set_bitmap, REGNO (x));
1902 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
1906 /* If we are about to do the last recursive call
1907 needed at this level, change it into iteration.
1908 This function is called enough to be worth it. */
1910 return oprs_not_set_p (XEXP (x, i), insn);
1912 if (! oprs_not_set_p (XEXP (x, i), insn))
1915 else if (fmt[i] == 'E')
1916 for (j = 0; j < XVECLEN (x, i); j++)
1917 if (! oprs_not_set_p (XVECEXP (x, i, j), insn))
1924 /* Mark things set by a CALL. */
1927 mark_call (rtx insn)
1929 if (! RTL_CONST_OR_PURE_CALL_P (insn))
1930 record_last_mem_set_info (insn);
1933 /* Mark things set by a SET. */
1936 mark_set (rtx pat, rtx insn)
1938 rtx dest = SET_DEST (pat);
1940 while (GET_CODE (dest) == SUBREG
1941 || GET_CODE (dest) == ZERO_EXTRACT
1942 || GET_CODE (dest) == STRICT_LOW_PART)
1943 dest = XEXP (dest, 0);
1946 SET_REGNO_REG_SET (reg_set_bitmap, REGNO (dest));
1947 else if (MEM_P (dest))
1948 record_last_mem_set_info (insn);
1950 if (GET_CODE (SET_SRC (pat)) == CALL)
1954 /* Record things set by a CLOBBER. */
1957 mark_clobber (rtx pat, rtx insn)
1959 rtx clob = XEXP (pat, 0);
1961 while (GET_CODE (clob) == SUBREG || GET_CODE (clob) == STRICT_LOW_PART)
1962 clob = XEXP (clob, 0);
1965 SET_REGNO_REG_SET (reg_set_bitmap, REGNO (clob));
1967 record_last_mem_set_info (insn);
1970 /* Record things set by INSN.
1971 This data is used by oprs_not_set_p. */
1974 mark_oprs_set (rtx insn)
1976 rtx pat = PATTERN (insn);
1979 if (GET_CODE (pat) == SET)
1980 mark_set (pat, insn);
1981 else if (GET_CODE (pat) == PARALLEL)
1982 for (i = 0; i < XVECLEN (pat, 0); i++)
1984 rtx x = XVECEXP (pat, 0, i);
1986 if (GET_CODE (x) == SET)
1988 else if (GET_CODE (x) == CLOBBER)
1989 mark_clobber (x, insn);
1990 else if (GET_CODE (x) == CALL)
1994 else if (GET_CODE (pat) == CLOBBER)
1995 mark_clobber (pat, insn);
1996 else if (GET_CODE (pat) == CALL)
2001 /* Compute copy/constant propagation working variables. */
2003 /* Local properties of assignments. */
2004 static sbitmap *cprop_pavloc;
2005 static sbitmap *cprop_absaltered;
2007 /* Global properties of assignments (computed from the local properties). */
2008 static sbitmap *cprop_avin;
2009 static sbitmap *cprop_avout;
2011 /* Allocate vars used for copy/const propagation. N_BLOCKS is the number of
2012 basic blocks. N_SETS is the number of sets. */
2015 alloc_cprop_mem (int n_blocks, int n_sets)
2017 cprop_pavloc = sbitmap_vector_alloc (n_blocks, n_sets);
2018 cprop_absaltered = sbitmap_vector_alloc (n_blocks, n_sets);
2020 cprop_avin = sbitmap_vector_alloc (n_blocks, n_sets);
2021 cprop_avout = sbitmap_vector_alloc (n_blocks, n_sets);
2024 /* Free vars used by copy/const propagation. */
2027 free_cprop_mem (void)
2029 sbitmap_vector_free (cprop_pavloc);
2030 sbitmap_vector_free (cprop_absaltered);
2031 sbitmap_vector_free (cprop_avin);
2032 sbitmap_vector_free (cprop_avout);
2035 /* For each block, compute whether X is transparent. X is either an
2036 expression or an assignment [though we don't care which, for this context
2037 an assignment is treated as an expression]. For each block where an
2038 element of X is modified, set (SET_P == 1) or reset (SET_P == 0) the INDX
2042 compute_transp (const_rtx x, int indx, sbitmap *bmap, int set_p)
2048 /* repeat is used to turn tail-recursion into iteration since GCC
2049 can't do it when there's no return value. */
2055 code = GET_CODE (x);
2062 for (def = DF_REG_DEF_CHAIN (REGNO (x));
2064 def = DF_REF_NEXT_REG (def))
2065 SET_BIT (bmap[DF_REF_BB (def)->index], indx);
2070 for (def = DF_REG_DEF_CHAIN (REGNO (x));
2072 def = DF_REF_NEXT_REG (def))
2073 RESET_BIT (bmap[DF_REF_BB (def)->index], indx);
2079 if (! MEM_READONLY_P (x))
2084 /* First handle all the blocks with calls. We don't need to
2085 do any list walking for them. */
2086 EXECUTE_IF_SET_IN_BITMAP (blocks_with_calls, 0, bb_index, bi)
2089 SET_BIT (bmap[bb_index], indx);
2091 RESET_BIT (bmap[bb_index], indx);
2094 /* Now iterate over the blocks which have memory modifications
2095 but which do not have any calls. */
2096 EXECUTE_IF_AND_COMPL_IN_BITMAP (modify_mem_list_set,
2100 rtx list_entry = canon_modify_mem_list[bb_index];
2104 rtx dest, dest_addr;
2106 /* LIST_ENTRY must be an INSN of some kind that sets memory.
2107 Examine each hunk of memory that is modified. */
2109 dest = XEXP (list_entry, 0);
2110 list_entry = XEXP (list_entry, 1);
2111 dest_addr = XEXP (list_entry, 0);
2113 if (canon_true_dependence (dest, GET_MODE (dest), dest_addr,
2114 x, NULL_RTX, rtx_addr_varies_p))
2117 SET_BIT (bmap[bb_index], indx);
2119 RESET_BIT (bmap[bb_index], indx);
2122 list_entry = XEXP (list_entry, 1);
2147 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2151 /* If we are about to do the last recursive call
2152 needed at this level, change it into iteration.
2153 This function is called enough to be worth it. */
2160 compute_transp (XEXP (x, i), indx, bmap, set_p);
2162 else if (fmt[i] == 'E')
2163 for (j = 0; j < XVECLEN (x, i); j++)
2164 compute_transp (XVECEXP (x, i, j), indx, bmap, set_p);
2168 /* Top level routine to do the dataflow analysis needed by copy/const
2172 compute_cprop_data (void)
2174 compute_local_properties (cprop_absaltered, cprop_pavloc, NULL, &set_hash_table);
2175 compute_available (cprop_pavloc, cprop_absaltered,
2176 cprop_avout, cprop_avin);
2179 /* Copy/constant propagation. */
2181 /* Maximum number of register uses in an insn that we handle. */
2184 /* Table of uses found in an insn.
2185 Allocated statically to avoid alloc/free complexity and overhead. */
2186 static struct reg_use reg_use_table[MAX_USES];
2188 /* Index into `reg_use_table' while building it. */
2189 static int reg_use_count;
2191 /* Set up a list of register numbers used in INSN. The found uses are stored
2192 in `reg_use_table'. `reg_use_count' is initialized to zero before entry,
2193 and contains the number of uses in the table upon exit.
2195 ??? If a register appears multiple times we will record it multiple times.
2196 This doesn't hurt anything but it will slow things down. */
2199 find_used_regs (rtx *xptr, void *data ATTRIBUTE_UNUSED)
2206 /* repeat is used to turn tail-recursion into iteration since GCC
2207 can't do it when there's no return value. */
2212 code = GET_CODE (x);
2215 if (reg_use_count == MAX_USES)
2218 reg_use_table[reg_use_count].reg_rtx = x;
2222 /* Recursively scan the operands of this expression. */
2224 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2228 /* If we are about to do the last recursive call
2229 needed at this level, change it into iteration.
2230 This function is called enough to be worth it. */
2237 find_used_regs (&XEXP (x, i), data);
2239 else if (fmt[i] == 'E')
2240 for (j = 0; j < XVECLEN (x, i); j++)
2241 find_used_regs (&XVECEXP (x, i, j), data);
2245 /* Try to replace all non-SET_DEST occurrences of FROM in INSN with TO.
2246 Returns nonzero is successful. */
2249 try_replace_reg (rtx from, rtx to, rtx insn)
2251 rtx note = find_reg_equal_equiv_note (insn);
2254 rtx set = single_set (insn);
2256 /* Usually we substitute easy stuff, so we won't copy everything.
2257 We however need to take care to not duplicate non-trivial CONST
2261 validate_replace_src_group (from, to, insn);
2262 if (num_changes_pending () && apply_change_group ())
2265 /* Try to simplify SET_SRC if we have substituted a constant. */
2266 if (success && set && CONSTANT_P (to))
2268 src = simplify_rtx (SET_SRC (set));
2271 validate_change (insn, &SET_SRC (set), src, 0);
2274 /* If there is already a REG_EQUAL note, update the expression in it
2275 with our replacement. */
2276 if (note != 0 && REG_NOTE_KIND (note) == REG_EQUAL)
2277 set_unique_reg_note (insn, REG_EQUAL,
2278 simplify_replace_rtx (XEXP (note, 0), from,
2280 if (!success && set && reg_mentioned_p (from, SET_SRC (set)))
2282 /* If above failed and this is a single set, try to simplify the source of
2283 the set given our substitution. We could perhaps try this for multiple
2284 SETs, but it probably won't buy us anything. */
2285 src = simplify_replace_rtx (SET_SRC (set), from, to);
2287 if (!rtx_equal_p (src, SET_SRC (set))
2288 && validate_change (insn, &SET_SRC (set), src, 0))
2291 /* If we've failed to do replacement, have a single SET, don't already
2292 have a note, and have no special SET, add a REG_EQUAL note to not
2293 lose information. */
2294 if (!success && note == 0 && set != 0
2295 && GET_CODE (SET_DEST (set)) != ZERO_EXTRACT
2296 && GET_CODE (SET_DEST (set)) != STRICT_LOW_PART)
2297 note = set_unique_reg_note (insn, REG_EQUAL, copy_rtx (src));
2300 /* REG_EQUAL may get simplified into register.
2301 We don't allow that. Remove that note. This code ought
2302 not to happen, because previous code ought to synthesize
2303 reg-reg move, but be on the safe side. */
2304 if (note && REG_NOTE_KIND (note) == REG_EQUAL && REG_P (XEXP (note, 0)))
2305 remove_note (insn, note);
2310 /* Find a set of REGNOs that are available on entry to INSN's block. Returns
2311 NULL no such set is found. */
2313 static struct expr *
2314 find_avail_set (int regno, rtx insn)
2316 /* SET1 contains the last set found that can be returned to the caller for
2317 use in a substitution. */
2318 struct expr *set1 = 0;
2320 /* Loops are not possible here. To get a loop we would need two sets
2321 available at the start of the block containing INSN. i.e. we would
2322 need two sets like this available at the start of the block:
2324 (set (reg X) (reg Y))
2325 (set (reg Y) (reg X))
2327 This can not happen since the set of (reg Y) would have killed the
2328 set of (reg X) making it unavailable at the start of this block. */
2332 struct expr *set = lookup_set (regno, &set_hash_table);
2334 /* Find a set that is available at the start of the block
2335 which contains INSN. */
2338 if (TEST_BIT (cprop_avin[BLOCK_NUM (insn)], set->bitmap_index))
2340 set = next_set (regno, set);
2343 /* If no available set was found we've reached the end of the
2344 (possibly empty) copy chain. */
2348 gcc_assert (GET_CODE (set->expr) == SET);
2350 src = SET_SRC (set->expr);
2352 /* We know the set is available.
2353 Now check that SRC is ANTLOC (i.e. none of the source operands
2354 have changed since the start of the block).
2356 If the source operand changed, we may still use it for the next
2357 iteration of this loop, but we may not use it for substitutions. */
2359 if (gcse_constant_p (src) || oprs_not_set_p (src, insn))
2362 /* If the source of the set is anything except a register, then
2363 we have reached the end of the copy chain. */
2367 /* Follow the copy chain, i.e. start another iteration of the loop
2368 and see if we have an available copy into SRC. */
2369 regno = REGNO (src);
2372 /* SET1 holds the last set that was available and anticipatable at
2377 /* Subroutine of cprop_insn that tries to propagate constants into
2378 JUMP_INSNS. JUMP must be a conditional jump. If SETCC is non-NULL
2379 it is the instruction that immediately precedes JUMP, and must be a
2380 single SET of a register. FROM is what we will try to replace,
2381 SRC is the constant we will try to substitute for it. Returns nonzero
2382 if a change was made. */
2385 cprop_jump (basic_block bb, rtx setcc, rtx jump, rtx from, rtx src)
2387 rtx new_rtx, set_src, note_src;
2388 rtx set = pc_set (jump);
2389 rtx note = find_reg_equal_equiv_note (jump);
2393 note_src = XEXP (note, 0);
2394 if (GET_CODE (note_src) == EXPR_LIST)
2395 note_src = NULL_RTX;
2397 else note_src = NULL_RTX;
2399 /* Prefer REG_EQUAL notes except those containing EXPR_LISTs. */
2400 set_src = note_src ? note_src : SET_SRC (set);
2402 /* First substitute the SETCC condition into the JUMP instruction,
2403 then substitute that given values into this expanded JUMP. */
2404 if (setcc != NULL_RTX
2405 && !modified_between_p (from, setcc, jump)
2406 && !modified_between_p (src, setcc, jump))
2409 rtx setcc_set = single_set (setcc);
2410 rtx setcc_note = find_reg_equal_equiv_note (setcc);
2411 setcc_src = (setcc_note && GET_CODE (XEXP (setcc_note, 0)) != EXPR_LIST)
2412 ? XEXP (setcc_note, 0) : SET_SRC (setcc_set);
2413 set_src = simplify_replace_rtx (set_src, SET_DEST (setcc_set),
2419 new_rtx = simplify_replace_rtx (set_src, from, src);
2421 /* If no simplification can be made, then try the next register. */
2422 if (rtx_equal_p (new_rtx, SET_SRC (set)))
2425 /* If this is now a no-op delete it, otherwise this must be a valid insn. */
2426 if (new_rtx == pc_rtx)
2430 /* Ensure the value computed inside the jump insn to be equivalent
2431 to one computed by setcc. */
2432 if (setcc && modified_in_p (new_rtx, setcc))
2434 if (! validate_unshare_change (jump, &SET_SRC (set), new_rtx, 0))
2436 /* When (some) constants are not valid in a comparison, and there
2437 are two registers to be replaced by constants before the entire
2438 comparison can be folded into a constant, we need to keep
2439 intermediate information in REG_EQUAL notes. For targets with
2440 separate compare insns, such notes are added by try_replace_reg.
2441 When we have a combined compare-and-branch instruction, however,
2442 we need to attach a note to the branch itself to make this
2443 optimization work. */
2445 if (!rtx_equal_p (new_rtx, note_src))
2446 set_unique_reg_note (jump, REG_EQUAL, copy_rtx (new_rtx));
2450 /* Remove REG_EQUAL note after simplification. */
2452 remove_note (jump, note);
2456 /* Delete the cc0 setter. */
2457 if (setcc != NULL && CC0_P (SET_DEST (single_set (setcc))))
2458 delete_insn (setcc);
2461 global_const_prop_count++;
2462 if (dump_file != NULL)
2465 "GLOBAL CONST-PROP: Replacing reg %d in jump_insn %d with constant ",
2466 REGNO (from), INSN_UID (jump));
2467 print_rtl (dump_file, src);
2468 fprintf (dump_file, "\n");
2470 purge_dead_edges (bb);
2472 /* If a conditional jump has been changed into unconditional jump, remove
2473 the jump and make the edge fallthru - this is always called in
2475 if (new_rtx != pc_rtx && simplejump_p (jump))
2480 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); ei_next (&ei))
2481 if (e->dest != EXIT_BLOCK_PTR
2482 && BB_HEAD (e->dest) == JUMP_LABEL (jump))
2484 e->flags |= EDGE_FALLTHRU;
2494 constprop_register (rtx insn, rtx from, rtx to)
2498 /* Check for reg or cc0 setting instructions followed by
2499 conditional branch instructions first. */
2500 if ((sset = single_set (insn)) != NULL
2502 && any_condjump_p (NEXT_INSN (insn)) && onlyjump_p (NEXT_INSN (insn)))
2504 rtx dest = SET_DEST (sset);
2505 if ((REG_P (dest) || CC0_P (dest))
2506 && cprop_jump (BLOCK_FOR_INSN (insn), insn, NEXT_INSN (insn), from, to))
2510 /* Handle normal insns next. */
2511 if (NONJUMP_INSN_P (insn)
2512 && try_replace_reg (from, to, insn))
2515 /* Try to propagate a CONST_INT into a conditional jump.
2516 We're pretty specific about what we will handle in this
2517 code, we can extend this as necessary over time.
2519 Right now the insn in question must look like
2520 (set (pc) (if_then_else ...)) */
2521 else if (any_condjump_p (insn) && onlyjump_p (insn))
2522 return cprop_jump (BLOCK_FOR_INSN (insn), NULL, insn, from, to);
2526 /* Perform constant and copy propagation on INSN.
2527 The result is nonzero if a change was made. */
2530 cprop_insn (rtx insn)
2532 struct reg_use *reg_used;
2540 note_uses (&PATTERN (insn), find_used_regs, NULL);
2542 note = find_reg_equal_equiv_note (insn);
2544 /* We may win even when propagating constants into notes. */
2546 find_used_regs (&XEXP (note, 0), NULL);
2548 for (reg_used = ®_use_table[0]; reg_use_count > 0;
2549 reg_used++, reg_use_count--)
2551 unsigned int regno = REGNO (reg_used->reg_rtx);
2555 /* If the register has already been set in this block, there's
2556 nothing we can do. */
2557 if (! oprs_not_set_p (reg_used->reg_rtx, insn))
2560 /* Find an assignment that sets reg_used and is available
2561 at the start of the block. */
2562 set = find_avail_set (regno, insn);
2567 /* ??? We might be able to handle PARALLELs. Later. */
2568 gcc_assert (GET_CODE (pat) == SET);
2570 src = SET_SRC (pat);
2572 /* Constant propagation. */
2573 if (gcse_constant_p (src))
2575 if (constprop_register (insn, reg_used->reg_rtx, src))
2578 global_const_prop_count++;
2579 if (dump_file != NULL)
2581 fprintf (dump_file, "GLOBAL CONST-PROP: Replacing reg %d in ", regno);
2582 fprintf (dump_file, "insn %d with constant ", INSN_UID (insn));
2583 print_rtl (dump_file, src);
2584 fprintf (dump_file, "\n");
2586 if (INSN_DELETED_P (insn))
2590 else if (REG_P (src)
2591 && REGNO (src) >= FIRST_PSEUDO_REGISTER
2592 && REGNO (src) != regno)
2594 if (try_replace_reg (reg_used->reg_rtx, src, insn))
2597 global_copy_prop_count++;
2598 if (dump_file != NULL)
2600 fprintf (dump_file, "GLOBAL COPY-PROP: Replacing reg %d in insn %d",
2601 regno, INSN_UID (insn));
2602 fprintf (dump_file, " with reg %d\n", REGNO (src));
2605 /* The original insn setting reg_used may or may not now be
2606 deletable. We leave the deletion to flow. */
2607 /* FIXME: If it turns out that the insn isn't deletable,
2608 then we may have unnecessarily extended register lifetimes
2609 and made things worse. */
2617 /* Like find_used_regs, but avoid recording uses that appear in
2618 input-output contexts such as zero_extract or pre_dec. This
2619 restricts the cases we consider to those for which local cprop
2620 can legitimately make replacements. */
2623 local_cprop_find_used_regs (rtx *xptr, void *data)
2630 switch (GET_CODE (x))
2634 case STRICT_LOW_PART:
2643 /* Can only legitimately appear this early in the context of
2644 stack pushes for function arguments, but handle all of the
2645 codes nonetheless. */
2649 /* Setting a subreg of a register larger than word_mode leaves
2650 the non-written words unchanged. */
2651 if (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) > BITS_PER_WORD)
2659 find_used_regs (xptr, data);
2662 /* Try to perform local const/copy propagation on X in INSN. */
2665 do_local_cprop (rtx x, rtx insn)
2667 rtx newreg = NULL, newcnst = NULL;
2669 /* Rule out USE instructions and ASM statements as we don't want to
2670 change the hard registers mentioned. */
2672 && (REGNO (x) >= FIRST_PSEUDO_REGISTER
2673 || (GET_CODE (PATTERN (insn)) != USE
2674 && asm_noperands (PATTERN (insn)) < 0)))
2676 cselib_val *val = cselib_lookup (x, GET_MODE (x), 0);
2677 struct elt_loc_list *l;
2681 for (l = val->locs; l; l = l->next)
2683 rtx this_rtx = l->loc;
2686 if (gcse_constant_p (this_rtx))
2688 if (REG_P (this_rtx) && REGNO (this_rtx) >= FIRST_PSEUDO_REGISTER
2689 /* Don't copy propagate if it has attached REG_EQUIV note.
2690 At this point this only function parameters should have
2691 REG_EQUIV notes and if the argument slot is used somewhere
2692 explicitly, it means address of parameter has been taken,
2693 so we should not extend the lifetime of the pseudo. */
2694 && (!(note = find_reg_note (l->setting_insn, REG_EQUIV, NULL_RTX))
2695 || ! MEM_P (XEXP (note, 0))))
2698 if (newcnst && constprop_register (insn, x, newcnst))
2700 if (dump_file != NULL)
2702 fprintf (dump_file, "LOCAL CONST-PROP: Replacing reg %d in ",
2704 fprintf (dump_file, "insn %d with constant ",
2706 print_rtl (dump_file, newcnst);
2707 fprintf (dump_file, "\n");
2709 local_const_prop_count++;
2712 else if (newreg && newreg != x && try_replace_reg (x, newreg, insn))
2714 if (dump_file != NULL)
2717 "LOCAL COPY-PROP: Replacing reg %d in insn %d",
2718 REGNO (x), INSN_UID (insn));
2719 fprintf (dump_file, " with reg %d\n", REGNO (newreg));
2721 local_copy_prop_count++;
2728 /* Do local const/copy propagation (i.e. within each basic block). */
2731 local_cprop_pass (void)
2735 struct reg_use *reg_used;
2736 bool changed = false;
2738 cselib_init (false);
2741 FOR_BB_INSNS (bb, insn)
2745 rtx note = find_reg_equal_equiv_note (insn);
2749 note_uses (&PATTERN (insn), local_cprop_find_used_regs,
2752 local_cprop_find_used_regs (&XEXP (note, 0), NULL);
2754 for (reg_used = ®_use_table[0]; reg_use_count > 0;
2755 reg_used++, reg_use_count--)
2757 if (do_local_cprop (reg_used->reg_rtx, insn))
2763 if (INSN_DELETED_P (insn))
2766 while (reg_use_count);
2768 cselib_process_insn (insn);
2771 /* Forget everything at the end of a basic block. */
2772 cselib_clear_table ();
2780 /* Similar to get_condition, only the resulting condition must be
2781 valid at JUMP, instead of at EARLIEST.
2783 This differs from noce_get_condition in ifcvt.c in that we prefer not to
2784 settle for the condition variable in the jump instruction being integral.
2785 We prefer to be able to record the value of a user variable, rather than
2786 the value of a temporary used in a condition. This could be solved by
2787 recording the value of *every* register scanned by canonicalize_condition,
2788 but this would require some code reorganization. */
2791 fis_get_condition (rtx jump)
2793 return get_condition (jump, NULL, false, true);
2796 /* Check the comparison COND to see if we can safely form an implicit set from
2797 it. COND is either an EQ or NE comparison. */
2800 implicit_set_cond_p (const_rtx cond)
2802 const enum machine_mode mode = GET_MODE (XEXP (cond, 0));
2803 const_rtx cst = XEXP (cond, 1);
2805 /* We can't perform this optimization if either operand might be or might
2806 contain a signed zero. */
2807 if (HONOR_SIGNED_ZEROS (mode))
2809 /* It is sufficient to check if CST is or contains a zero. We must
2810 handle float, complex, and vector. If any subpart is a zero, then
2811 the optimization can't be performed. */
2812 /* ??? The complex and vector checks are not implemented yet. We just
2813 always return zero for them. */
2814 if (GET_CODE (cst) == CONST_DOUBLE)
2817 REAL_VALUE_FROM_CONST_DOUBLE (d, cst);
2818 if (REAL_VALUES_EQUAL (d, dconst0))
2825 return gcse_constant_p (cst);
2828 /* Find the implicit sets of a function. An "implicit set" is a constraint
2829 on the value of a variable, implied by a conditional jump. For example,
2830 following "if (x == 2)", the then branch may be optimized as though the
2831 conditional performed an "explicit set", in this example, "x = 2". This
2832 function records the set patterns that are implicit at the start of each
2835 FIXME: This would be more effective if critical edges are pre-split. As
2836 it is now, we can't record implicit sets for blocks that have
2837 critical successor edges. This results in missed optimizations
2838 and in more (unnecessary) work in cfgcleanup.c:thread_jump(). */
2841 find_implicit_sets (void)
2843 basic_block bb, dest;
2849 /* Check for more than one successor. */
2850 if (EDGE_COUNT (bb->succs) > 1)
2852 cond = fis_get_condition (BB_END (bb));
2855 && (GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
2856 && REG_P (XEXP (cond, 0))
2857 && REGNO (XEXP (cond, 0)) >= FIRST_PSEUDO_REGISTER
2858 && implicit_set_cond_p (cond))
2860 dest = GET_CODE (cond) == EQ ? BRANCH_EDGE (bb)->dest
2861 : FALLTHRU_EDGE (bb)->dest;
2864 /* Record nothing for a critical edge. */
2865 && single_pred_p (dest)
2866 && dest != EXIT_BLOCK_PTR)
2868 new_rtx = gen_rtx_SET (VOIDmode, XEXP (cond, 0),
2870 implicit_sets[dest->index] = new_rtx;
2873 fprintf(dump_file, "Implicit set of reg %d in ",
2874 REGNO (XEXP (cond, 0)));
2875 fprintf(dump_file, "basic block %d\n", dest->index);
2883 fprintf (dump_file, "Found %d implicit sets\n", count);
2886 /* Bypass conditional jumps. */
2888 /* The value of last_basic_block at the beginning of the jump_bypass
2889 pass. The use of redirect_edge_and_branch_force may introduce new
2890 basic blocks, but the data flow analysis is only valid for basic
2891 block indices less than bypass_last_basic_block. */
2893 static int bypass_last_basic_block;
2895 /* Find a set of REGNO to a constant that is available at the end of basic
2896 block BB. Returns NULL if no such set is found. Based heavily upon
2899 static struct expr *
2900 find_bypass_set (int regno, int bb)
2902 struct expr *result = 0;
2907 struct expr *set = lookup_set (regno, &set_hash_table);
2911 if (TEST_BIT (cprop_avout[bb], set->bitmap_index))
2913 set = next_set (regno, set);
2919 gcc_assert (GET_CODE (set->expr) == SET);
2921 src = SET_SRC (set->expr);
2922 if (gcse_constant_p (src))
2928 regno = REGNO (src);
2934 /* Subroutine of bypass_block that checks whether a pseudo is killed by
2935 any of the instructions inserted on an edge. Jump bypassing places
2936 condition code setters on CFG edges using insert_insn_on_edge. This
2937 function is required to check that our data flow analysis is still
2938 valid prior to commit_edge_insertions. */
2941 reg_killed_on_edge (const_rtx reg, const_edge e)
2945 for (insn = e->insns.r; insn; insn = NEXT_INSN (insn))
2946 if (INSN_P (insn) && reg_set_p (reg, insn))
2952 /* Subroutine of bypass_conditional_jumps that attempts to bypass the given
2953 basic block BB which has more than one predecessor. If not NULL, SETCC
2954 is the first instruction of BB, which is immediately followed by JUMP_INSN
2955 JUMP. Otherwise, SETCC is NULL, and JUMP is the first insn of BB.
2956 Returns nonzero if a change was made.
2958 During the jump bypassing pass, we may place copies of SETCC instructions
2959 on CFG edges. The following routine must be careful to pay attention to
2960 these inserted insns when performing its transformations. */
2963 bypass_block (basic_block bb, rtx setcc, rtx jump)
2968 int may_be_loop_header;
2972 insn = (setcc != NULL) ? setcc : jump;
2974 /* Determine set of register uses in INSN. */
2976 note_uses (&PATTERN (insn), find_used_regs, NULL);
2977 note = find_reg_equal_equiv_note (insn);
2979 find_used_regs (&XEXP (note, 0), NULL);
2981 may_be_loop_header = false;
2982 FOR_EACH_EDGE (e, ei, bb->preds)
2983 if (e->flags & EDGE_DFS_BACK)
2985 may_be_loop_header = true;
2990 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
2994 if (e->flags & EDGE_COMPLEX)
3000 /* We can't redirect edges from new basic blocks. */
3001 if (e->src->index >= bypass_last_basic_block)
3007 /* The irreducible loops created by redirecting of edges entering the
3008 loop from outside would decrease effectiveness of some of the following
3009 optimizations, so prevent this. */
3010 if (may_be_loop_header
3011 && !(e->flags & EDGE_DFS_BACK))
3017 for (i = 0; i < reg_use_count; i++)
3019 struct reg_use *reg_used = ®_use_table[i];
3020 unsigned int regno = REGNO (reg_used->reg_rtx);
3021 basic_block dest, old_dest;
3025 set = find_bypass_set (regno, e->src->index);
3030 /* Check the data flow is valid after edge insertions. */
3031 if (e->insns.r && reg_killed_on_edge (reg_used->reg_rtx, e))
3034 src = SET_SRC (pc_set (jump));
3037 src = simplify_replace_rtx (src,
3038 SET_DEST (PATTERN (setcc)),
3039 SET_SRC (PATTERN (setcc)));
3041 new_rtx = simplify_replace_rtx (src, reg_used->reg_rtx,
3042 SET_SRC (set->expr));
3044 /* Jump bypassing may have already placed instructions on
3045 edges of the CFG. We can't bypass an outgoing edge that
3046 has instructions associated with it, as these insns won't
3047 get executed if the incoming edge is redirected. */
3049 if (new_rtx == pc_rtx)
3051 edest = FALLTHRU_EDGE (bb);
3052 dest = edest->insns.r ? NULL : edest->dest;
3054 else if (GET_CODE (new_rtx) == LABEL_REF)
3056 dest = BLOCK_FOR_INSN (XEXP (new_rtx, 0));
3057 /* Don't bypass edges containing instructions. */
3058 edest = find_edge (bb, dest);
3059 if (edest && edest->insns.r)
3065 /* Avoid unification of the edge with other edges from original
3066 branch. We would end up emitting the instruction on "both"
3069 if (dest && setcc && !CC0_P (SET_DEST (PATTERN (setcc)))
3070 && find_edge (e->src, dest))
3076 && dest != EXIT_BLOCK_PTR)
3078 redirect_edge_and_branch_force (e, dest);
3080 /* Copy the register setter to the redirected edge.
3081 Don't copy CC0 setters, as CC0 is dead after jump. */
3084 rtx pat = PATTERN (setcc);
3085 if (!CC0_P (SET_DEST (pat)))
3086 insert_insn_on_edge (copy_insn (pat), e);
3089 if (dump_file != NULL)
3091 fprintf (dump_file, "JUMP-BYPASS: Proved reg %d "
3092 "in jump_insn %d equals constant ",
3093 regno, INSN_UID (jump));
3094 print_rtl (dump_file, SET_SRC (set->expr));
3095 fprintf (dump_file, "\nBypass edge from %d->%d to %d\n",
3096 e->src->index, old_dest->index, dest->index);
3109 /* Find basic blocks with more than one predecessor that only contain a
3110 single conditional jump. If the result of the comparison is known at
3111 compile-time from any incoming edge, redirect that edge to the
3112 appropriate target. Returns nonzero if a change was made.
3114 This function is now mis-named, because we also handle indirect jumps. */
3117 bypass_conditional_jumps (void)
3125 /* Note we start at block 1. */
3126 if (ENTRY_BLOCK_PTR->next_bb == EXIT_BLOCK_PTR)
3129 bypass_last_basic_block = last_basic_block;
3130 mark_dfs_back_edges ();
3133 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb,
3134 EXIT_BLOCK_PTR, next_bb)
3136 /* Check for more than one predecessor. */
3137 if (!single_pred_p (bb))
3140 FOR_BB_INSNS (bb, insn)
3141 if (NONJUMP_INSN_P (insn))
3145 if (GET_CODE (PATTERN (insn)) != SET)
3148 dest = SET_DEST (PATTERN (insn));
3149 if (REG_P (dest) || CC0_P (dest))
3154 else if (JUMP_P (insn))
3156 if ((any_condjump_p (insn) || computed_jump_p (insn))
3157 && onlyjump_p (insn))
3158 changed |= bypass_block (bb, setcc, insn);
3161 else if (INSN_P (insn))
3166 /* If we bypassed any register setting insns, we inserted a
3167 copy on the redirected edge. These need to be committed. */
3169 commit_edge_insertions ();
3174 /* Compute PRE+LCM working variables. */
3176 /* Local properties of expressions. */
3177 /* Nonzero for expressions that are transparent in the block. */
3178 static sbitmap *transp;
3180 /* Nonzero for expressions that are transparent at the end of the block.
3181 This is only zero for expressions killed by abnormal critical edge
3182 created by a calls. */
3183 static sbitmap *transpout;
3185 /* Nonzero for expressions that are computed (available) in the block. */
3186 static sbitmap *comp;
3188 /* Nonzero for expressions that are locally anticipatable in the block. */
3189 static sbitmap *antloc;
3191 /* Nonzero for expressions where this block is an optimal computation
3193 static sbitmap *pre_optimal;
3195 /* Nonzero for expressions which are redundant in a particular block. */
3196 static sbitmap *pre_redundant;
3198 /* Nonzero for expressions which should be inserted on a specific edge. */
3199 static sbitmap *pre_insert_map;
3201 /* Nonzero for expressions which should be deleted in a specific block. */
3202 static sbitmap *pre_delete_map;
3204 /* Contains the edge_list returned by pre_edge_lcm. */
3205 static struct edge_list *edge_list;
3207 /* Allocate vars used for PRE analysis. */
3210 alloc_pre_mem (int n_blocks, int n_exprs)
3212 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
3213 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
3214 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
3217 pre_redundant = NULL;
3218 pre_insert_map = NULL;
3219 pre_delete_map = NULL;
3220 ae_kill = sbitmap_vector_alloc (n_blocks, n_exprs);
3222 /* pre_insert and pre_delete are allocated later. */
3225 /* Free vars used for PRE analysis. */
3230 sbitmap_vector_free (transp);
3231 sbitmap_vector_free (comp);
3233 /* ANTLOC and AE_KILL are freed just after pre_lcm finishes. */
3236 sbitmap_vector_free (pre_optimal);
3238 sbitmap_vector_free (pre_redundant);
3240 sbitmap_vector_free (pre_insert_map);
3242 sbitmap_vector_free (pre_delete_map);
3244 transp = comp = NULL;
3245 pre_optimal = pre_redundant = pre_insert_map = pre_delete_map = NULL;
3248 /* Top level routine to do the dataflow analysis needed by PRE. */
3251 compute_pre_data (void)
3253 sbitmap trapping_expr;
3257 compute_local_properties (transp, comp, antloc, &expr_hash_table);
3258 sbitmap_vector_zero (ae_kill, last_basic_block);
3260 /* Collect expressions which might trap. */
3261 trapping_expr = sbitmap_alloc (expr_hash_table.n_elems);
3262 sbitmap_zero (trapping_expr);
3263 for (ui = 0; ui < expr_hash_table.size; ui++)
3266 for (e = expr_hash_table.table[ui]; e != NULL; e = e->next_same_hash)
3267 if (may_trap_p (e->expr))
3268 SET_BIT (trapping_expr, e->bitmap_index);
3271 /* Compute ae_kill for each basic block using:
3281 /* If the current block is the destination of an abnormal edge, we
3282 kill all trapping expressions because we won't be able to properly
3283 place the instruction on the edge. So make them neither
3284 anticipatable nor transparent. This is fairly conservative. */
3285 FOR_EACH_EDGE (e, ei, bb->preds)
3286 if (e->flags & EDGE_ABNORMAL)
3288 sbitmap_difference (antloc[bb->index], antloc[bb->index], trapping_expr);
3289 sbitmap_difference (transp[bb->index], transp[bb->index], trapping_expr);
3293 sbitmap_a_or_b (ae_kill[bb->index], transp[bb->index], comp[bb->index]);
3294 sbitmap_not (ae_kill[bb->index], ae_kill[bb->index]);
3297 edge_list = pre_edge_lcm (expr_hash_table.n_elems, transp, comp, antloc,
3298 ae_kill, &pre_insert_map, &pre_delete_map);
3299 sbitmap_vector_free (antloc);
3301 sbitmap_vector_free (ae_kill);
3303 sbitmap_free (trapping_expr);
3308 /* Return nonzero if an occurrence of expression EXPR in OCCR_BB would reach
3311 VISITED is a pointer to a working buffer for tracking which BB's have
3312 been visited. It is NULL for the top-level call.
3314 We treat reaching expressions that go through blocks containing the same
3315 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
3316 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
3317 2 as not reaching. The intent is to improve the probability of finding
3318 only one reaching expression and to reduce register lifetimes by picking
3319 the closest such expression. */
3322 pre_expr_reaches_here_p_work (basic_block occr_bb, struct expr *expr, basic_block bb, char *visited)
3327 FOR_EACH_EDGE (pred, ei, bb->preds)
3329 basic_block pred_bb = pred->src;
3331 if (pred->src == ENTRY_BLOCK_PTR
3332 /* Has predecessor has already been visited? */
3333 || visited[pred_bb->index])
3334 ;/* Nothing to do. */
3336 /* Does this predecessor generate this expression? */
3337 else if (TEST_BIT (comp[pred_bb->index], expr->bitmap_index))
3339 /* Is this the occurrence we're looking for?
3340 Note that there's only one generating occurrence per block
3341 so we just need to check the block number. */
3342 if (occr_bb == pred_bb)
3345 visited[pred_bb->index] = 1;
3347 /* Ignore this predecessor if it kills the expression. */
3348 else if (! TEST_BIT (transp[pred_bb->index], expr->bitmap_index))
3349 visited[pred_bb->index] = 1;
3351 /* Neither gen nor kill. */
3354 visited[pred_bb->index] = 1;
3355 if (pre_expr_reaches_here_p_work (occr_bb, expr, pred_bb, visited))
3360 /* All paths have been checked. */
3364 /* The wrapper for pre_expr_reaches_here_work that ensures that any
3365 memory allocated for that function is returned. */
3368 pre_expr_reaches_here_p (basic_block occr_bb, struct expr *expr, basic_block bb)
3371 char *visited = XCNEWVEC (char, last_basic_block);
3373 rval = pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited);
3380 /* Given an expr, generate RTL which we can insert at the end of a BB,
3381 or on an edge. Set the block number of any insns generated to
3385 process_insert_insn (struct expr *expr)
3387 rtx reg = expr->reaching_reg;
3388 rtx exp = copy_rtx (expr->expr);
3393 /* If the expression is something that's an operand, like a constant,
3394 just copy it to a register. */
3395 if (general_operand (exp, GET_MODE (reg)))
3396 emit_move_insn (reg, exp);
3398 /* Otherwise, make a new insn to compute this expression and make sure the
3399 insn will be recognized (this also adds any needed CLOBBERs). Copy the
3400 expression to make sure we don't have any sharing issues. */
3403 rtx insn = emit_insn (gen_rtx_SET (VOIDmode, reg, exp));
3405 if (insn_invalid_p (insn))
3416 /* Add EXPR to the end of basic block BB.
3418 This is used by both the PRE and code hoisting.
3420 For PRE, we want to verify that the expr is either transparent
3421 or locally anticipatable in the target block. This check makes
3422 no sense for code hoisting. */
3425 insert_insn_end_basic_block (struct expr *expr, basic_block bb, int pre)
3427 rtx insn = BB_END (bb);
3429 rtx reg = expr->reaching_reg;
3430 int regno = REGNO (reg);
3433 pat = process_insert_insn (expr);
3434 gcc_assert (pat && INSN_P (pat));
3437 while (NEXT_INSN (pat_end) != NULL_RTX)
3438 pat_end = NEXT_INSN (pat_end);
3440 /* If the last insn is a jump, insert EXPR in front [taking care to
3441 handle cc0, etc. properly]. Similarly we need to care trapping
3442 instructions in presence of non-call exceptions. */
3445 || (NONJUMP_INSN_P (insn)
3446 && (!single_succ_p (bb)
3447 || single_succ_edge (bb)->flags & EDGE_ABNORMAL)))
3452 /* It should always be the case that we can put these instructions
3453 anywhere in the basic block with performing PRE optimizations.
3455 gcc_assert (!NONJUMP_INSN_P (insn) || !pre
3456 || TEST_BIT (antloc[bb->index], expr->bitmap_index)
3457 || TEST_BIT (transp[bb->index], expr->bitmap_index));
3459 /* If this is a jump table, then we can't insert stuff here. Since
3460 we know the previous real insn must be the tablejump, we insert
3461 the new instruction just before the tablejump. */
3462 if (GET_CODE (PATTERN (insn)) == ADDR_VEC
3463 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
3464 insn = prev_real_insn (insn);
3467 /* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts
3468 if cc0 isn't set. */
3469 note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
3471 insn = XEXP (note, 0);
3474 rtx maybe_cc0_setter = prev_nonnote_insn (insn);
3475 if (maybe_cc0_setter
3476 && INSN_P (maybe_cc0_setter)
3477 && sets_cc0_p (PATTERN (maybe_cc0_setter)))
3478 insn = maybe_cc0_setter;
3481 /* FIXME: What if something in cc0/jump uses value set in new insn? */
3482 new_insn = emit_insn_before_noloc (pat, insn, bb);
3485 /* Likewise if the last insn is a call, as will happen in the presence
3486 of exception handling. */
3487 else if (CALL_P (insn)
3488 && (!single_succ_p (bb)
3489 || single_succ_edge (bb)->flags & EDGE_ABNORMAL))
3491 /* Keeping in mind SMALL_REGISTER_CLASSES and parameters in registers,
3492 we search backward and place the instructions before the first
3493 parameter is loaded. Do this for everyone for consistency and a
3494 presumption that we'll get better code elsewhere as well.
3496 It should always be the case that we can put these instructions
3497 anywhere in the basic block with performing PRE optimizations.
3501 || TEST_BIT (antloc[bb->index], expr->bitmap_index)
3502 || TEST_BIT (transp[bb->index], expr->bitmap_index));
3504 /* Since different machines initialize their parameter registers
3505 in different orders, assume nothing. Collect the set of all
3506 parameter registers. */
3507 insn = find_first_parameter_load (insn, BB_HEAD (bb));
3509 /* If we found all the parameter loads, then we want to insert
3510 before the first parameter load.
3512 If we did not find all the parameter loads, then we might have
3513 stopped on the head of the block, which could be a CODE_LABEL.
3514 If we inserted before the CODE_LABEL, then we would be putting
3515 the insn in the wrong basic block. In that case, put the insn
3516 after the CODE_LABEL. Also, respect NOTE_INSN_BASIC_BLOCK. */
3517 while (LABEL_P (insn)
3518 || NOTE_INSN_BASIC_BLOCK_P (insn))
3519 insn = NEXT_INSN (insn);
3521 new_insn = emit_insn_before_noloc (pat, insn, bb);
3524 new_insn = emit_insn_after_noloc (pat, insn, bb);
3529 add_label_notes (PATTERN (pat), new_insn);
3532 pat = NEXT_INSN (pat);
3535 gcse_create_count++;
3539 fprintf (dump_file, "PRE/HOIST: end of bb %d, insn %d, ",
3540 bb->index, INSN_UID (new_insn));
3541 fprintf (dump_file, "copying expression %d to reg %d\n",
3542 expr->bitmap_index, regno);
3546 /* Insert partially redundant expressions on edges in the CFG to make
3547 the expressions fully redundant. */
3550 pre_edge_insert (struct edge_list *edge_list, struct expr **index_map)
3552 int e, i, j, num_edges, set_size, did_insert = 0;
3555 /* Where PRE_INSERT_MAP is nonzero, we add the expression on that edge
3556 if it reaches any of the deleted expressions. */
3558 set_size = pre_insert_map[0]->size;
3559 num_edges = NUM_EDGES (edge_list);
3560 inserted = sbitmap_vector_alloc (num_edges, expr_hash_table.n_elems);
3561 sbitmap_vector_zero (inserted, num_edges);
3563 for (e = 0; e < num_edges; e++)
3566 basic_block bb = INDEX_EDGE_PRED_BB (edge_list, e);
3568 for (i = indx = 0; i < set_size; i++, indx += SBITMAP_ELT_BITS)
3570 SBITMAP_ELT_TYPE insert = pre_insert_map[e]->elms[i];
3572 for (j = indx; insert && j < (int) expr_hash_table.n_elems; j++, insert >>= 1)
3573 if ((insert & 1) != 0 && index_map[j]->reaching_reg != NULL_RTX)
3575 struct expr *expr = index_map[j];
3578 /* Now look at each deleted occurrence of this expression. */
3579 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
3581 if (! occr->deleted_p)
3584 /* Insert this expression on this edge if it would
3585 reach the deleted occurrence in BB. */
3586 if (!TEST_BIT (inserted[e], j))
3589 edge eg = INDEX_EDGE (edge_list, e);
3591 /* We can't insert anything on an abnormal and
3592 critical edge, so we insert the insn at the end of
3593 the previous block. There are several alternatives
3594 detailed in Morgans book P277 (sec 10.5) for
3595 handling this situation. This one is easiest for
3598 if (eg->flags & EDGE_ABNORMAL)
3599 insert_insn_end_basic_block (index_map[j], bb, 0);
3602 insn = process_insert_insn (index_map[j]);
3603 insert_insn_on_edge (insn, eg);
3608 fprintf (dump_file, "PRE: edge (%d,%d), ",
3610 INDEX_EDGE_SUCC_BB (edge_list, e)->index);
3611 fprintf (dump_file, "copy expression %d\n",
3612 expr->bitmap_index);
3615 update_ld_motion_stores (expr);
3616 SET_BIT (inserted[e], j);
3618 gcse_create_count++;
3625 sbitmap_vector_free (inserted);
3629 /* Copy the result of EXPR->EXPR generated by INSN to EXPR->REACHING_REG.
3630 Given "old_reg <- expr" (INSN), instead of adding after it
3631 reaching_reg <- old_reg
3632 it's better to do the following:
3633 reaching_reg <- expr
3634 old_reg <- reaching_reg
3635 because this way copy propagation can discover additional PRE
3636 opportunities. But if this fails, we try the old way.
3637 When "expr" is a store, i.e.
3638 given "MEM <- old_reg", instead of adding after it
3639 reaching_reg <- old_reg
3640 it's better to add it before as follows:
3641 reaching_reg <- old_reg
3642 MEM <- reaching_reg. */
3645 pre_insert_copy_insn (struct expr *expr, rtx insn)
3647 rtx reg = expr->reaching_reg;
3648 int regno = REGNO (reg);
3649 int indx = expr->bitmap_index;
3650 rtx pat = PATTERN (insn);
3651 rtx set, first_set, new_insn;
3655 /* This block matches the logic in hash_scan_insn. */
3656 switch (GET_CODE (pat))
3663 /* Search through the parallel looking for the set whose
3664 source was the expression that we're interested in. */
3665 first_set = NULL_RTX;
3667 for (i = 0; i < XVECLEN (pat, 0); i++)
3669 rtx x = XVECEXP (pat, 0, i);
3670 if (GET_CODE (x) == SET)
3672 /* If the source was a REG_EQUAL or REG_EQUIV note, we
3673 may not find an equivalent expression, but in this
3674 case the PARALLEL will have a single set. */
3675 if (first_set == NULL_RTX)
3677 if (expr_equiv_p (SET_SRC (x), expr->expr))
3685 gcc_assert (first_set);
3686 if (set == NULL_RTX)
3694 if (REG_P (SET_DEST (set)))
3696 old_reg = SET_DEST (set);
3697 /* Check if we can modify the set destination in the original insn. */
3698 if (validate_change (insn, &SET_DEST (set), reg, 0))
3700 new_insn = gen_move_insn (old_reg, reg);
3701 new_insn = emit_insn_after (new_insn, insn);
3705 new_insn = gen_move_insn (reg, old_reg);
3706 new_insn = emit_insn_after (new_insn, insn);
3709 else /* This is possible only in case of a store to memory. */
3711 old_reg = SET_SRC (set);
3712 new_insn = gen_move_insn (reg, old_reg);
3714 /* Check if we can modify the set source in the original insn. */
3715 if (validate_change (insn, &SET_SRC (set), reg, 0))
3716 new_insn = emit_insn_before (new_insn, insn);
3718 new_insn = emit_insn_after (new_insn, insn);
3721 gcse_create_count++;
3725 "PRE: bb %d, insn %d, copy expression %d in insn %d to reg %d\n",
3726 BLOCK_NUM (insn), INSN_UID (new_insn), indx,
3727 INSN_UID (insn), regno);
3730 /* Copy available expressions that reach the redundant expression
3731 to `reaching_reg'. */
3734 pre_insert_copies (void)
3736 unsigned int i, added_copy;
3741 /* For each available expression in the table, copy the result to
3742 `reaching_reg' if the expression reaches a deleted one.
3744 ??? The current algorithm is rather brute force.
3745 Need to do some profiling. */
3747 for (i = 0; i < expr_hash_table.size; i++)
3748 for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
3750 /* If the basic block isn't reachable, PPOUT will be TRUE. However,
3751 we don't want to insert a copy here because the expression may not
3752 really be redundant. So only insert an insn if the expression was
3753 deleted. This test also avoids further processing if the
3754 expression wasn't deleted anywhere. */
3755 if (expr->reaching_reg == NULL)
3758 /* Set when we add a copy for that expression. */
3761 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
3763 if (! occr->deleted_p)
3766 for (avail = expr->avail_occr; avail != NULL; avail = avail->next)
3768 rtx insn = avail->insn;
3770 /* No need to handle this one if handled already. */
3771 if (avail->copied_p)
3774 /* Don't handle this one if it's a redundant one. */
3775 if (INSN_DELETED_P (insn))
3778 /* Or if the expression doesn't reach the deleted one. */
3779 if (! pre_expr_reaches_here_p (BLOCK_FOR_INSN (avail->insn),
3781 BLOCK_FOR_INSN (occr->insn)))
3786 /* Copy the result of avail to reaching_reg. */
3787 pre_insert_copy_insn (expr, insn);
3788 avail->copied_p = 1;
3793 update_ld_motion_stores (expr);
3797 /* Emit move from SRC to DEST noting the equivalence with expression computed
3800 gcse_emit_move_after (rtx src, rtx dest, rtx insn)
3803 rtx set = single_set (insn), set2;
3807 /* This should never fail since we're creating a reg->reg copy
3808 we've verified to be valid. */
3810 new_rtx = emit_insn_after (gen_move_insn (dest, src), insn);
3812 /* Note the equivalence for local CSE pass. */
3813 set2 = single_set (new_rtx);
3814 if (!set2 || !rtx_equal_p (SET_DEST (set2), dest))
3816 if ((note = find_reg_equal_equiv_note (insn)))
3817 eqv = XEXP (note, 0);
3819 eqv = SET_SRC (set);
3821 set_unique_reg_note (new_rtx, REG_EQUAL, copy_insn_1 (eqv));
3826 /* Delete redundant computations.
3827 Deletion is done by changing the insn to copy the `reaching_reg' of
3828 the expression into the result of the SET. It is left to later passes
3829 (cprop, cse2, flow, combine, regmove) to propagate the copy or eliminate it.
3831 Returns nonzero if a change is made. */
3842 for (i = 0; i < expr_hash_table.size; i++)
3843 for (expr = expr_hash_table.table[i];
3845 expr = expr->next_same_hash)
3847 int indx = expr->bitmap_index;
3849 /* We only need to search antic_occr since we require
3852 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
3854 rtx insn = occr->insn;
3856 basic_block bb = BLOCK_FOR_INSN (insn);
3858 /* We only delete insns that have a single_set. */
3859 if (TEST_BIT (pre_delete_map[bb->index], indx)
3860 && (set = single_set (insn)) != 0
3861 && dbg_cnt (pre_insn))
3863 /* Create a pseudo-reg to store the result of reaching
3864 expressions into. Get the mode for the new pseudo from
3865 the mode of the original destination pseudo. */
3866 if (expr->reaching_reg == NULL)
3867 expr->reaching_reg = gen_reg_rtx_and_attrs (SET_DEST (set));
3869 gcse_emit_move_after (expr->reaching_reg, SET_DEST (set), insn);
3871 occr->deleted_p = 1;
3878 "PRE: redundant insn %d (expression %d) in ",
3879 INSN_UID (insn), indx);
3880 fprintf (dump_file, "bb %d, reaching reg is %d\n",
3881 bb->index, REGNO (expr->reaching_reg));
3890 /* Perform GCSE optimizations using PRE.
3891 This is called by one_pre_gcse_pass after all the dataflow analysis
3894 This is based on the original Morel-Renvoise paper Fred Chow's thesis, and
3895 lazy code motion from Knoop, Ruthing and Steffen as described in Advanced
3896 Compiler Design and Implementation.
3898 ??? A new pseudo reg is created to hold the reaching expression. The nice
3899 thing about the classical approach is that it would try to use an existing
3900 reg. If the register can't be adequately optimized [i.e. we introduce
3901 reload problems], one could add a pass here to propagate the new register
3904 ??? We don't handle single sets in PARALLELs because we're [currently] not
3905 able to copy the rest of the parallel when we insert copies to create full
3906 redundancies from partial redundancies. However, there's no reason why we
3907 can't handle PARALLELs in the cases where there are no partial
3914 int did_insert, changed;
3915 struct expr **index_map;
3918 /* Compute a mapping from expression number (`bitmap_index') to
3919 hash table entry. */
3921 index_map = XCNEWVEC (struct expr *, expr_hash_table.n_elems);
3922 for (i = 0; i < expr_hash_table.size; i++)
3923 for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
3924 index_map[expr->bitmap_index] = expr;
3926 /* Delete the redundant insns first so that
3927 - we know what register to use for the new insns and for the other
3928 ones with reaching expressions
3929 - we know which insns are redundant when we go to create copies */
3931 changed = pre_delete ();
3932 did_insert = pre_edge_insert (edge_list, index_map);
3934 /* In other places with reaching expressions, copy the expression to the
3935 specially allocated pseudo-reg that reaches the redundant expr. */
3936 pre_insert_copies ();
3939 commit_edge_insertions ();
3947 /* Top level routine to perform one PRE GCSE pass.
3949 Return nonzero if a change was made. */
3952 one_pre_gcse_pass (void)
3956 gcse_subst_count = 0;
3957 gcse_create_count = 0;
3959 /* Return if there's nothing to do, or it is too expensive. */
3960 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
3961 || is_too_expensive (_("PRE disabled")))
3964 /* We need alias. */
3965 init_alias_analysis ();
3968 gcc_obstack_init (&gcse_obstack);
3971 alloc_hash_table (get_max_uid (), &expr_hash_table, 0);
3972 add_noreturn_fake_exit_edges ();
3974 compute_ld_motion_mems ();
3976 compute_hash_table (&expr_hash_table);
3977 trim_ld_motion_mems ();
3979 dump_hash_table (dump_file, "Expression", &expr_hash_table);
3981 if (expr_hash_table.n_elems > 0)
3983 alloc_pre_mem (last_basic_block, expr_hash_table.n_elems);
3984 compute_pre_data ();
3985 changed |= pre_gcse ();
3986 free_edge_list (edge_list);
3991 remove_fake_exit_edges ();
3992 free_hash_table (&expr_hash_table);
3995 obstack_free (&gcse_obstack, NULL);
3997 /* We are finished with alias. */
3998 end_alias_analysis ();
4002 fprintf (dump_file, "PRE GCSE of %s, %d basic blocks, %d bytes needed, ",
4003 current_function_name (), n_basic_blocks, bytes_used);
4004 fprintf (dump_file, "%d substs, %d insns created\n",
4005 gcse_subst_count, gcse_create_count);
4011 /* If X contains any LABEL_REF's, add REG_LABEL_OPERAND notes for them
4012 to INSN. If such notes are added to an insn which references a
4013 CODE_LABEL, the LABEL_NUSES count is incremented. We have to add
4014 that note, because the following loop optimization pass requires
4017 /* ??? If there was a jump optimization pass after gcse and before loop,
4018 then we would not need to do this here, because jump would add the
4019 necessary REG_LABEL_OPERAND and REG_LABEL_TARGET notes. */
4022 add_label_notes (rtx x, rtx insn)
4024 enum rtx_code code = GET_CODE (x);
4028 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
4030 /* This code used to ignore labels that referred to dispatch tables to
4031 avoid flow generating (slightly) worse code.
4033 We no longer ignore such label references (see LABEL_REF handling in
4034 mark_jump_label for additional information). */
4036 /* There's no reason for current users to emit jump-insns with
4037 such a LABEL_REF, so we don't have to handle REG_LABEL_TARGET
4039 gcc_assert (!JUMP_P (insn));
4040 add_reg_note (insn, REG_LABEL_OPERAND, XEXP (x, 0));
4042 if (LABEL_P (XEXP (x, 0)))
4043 LABEL_NUSES (XEXP (x, 0))++;
4048 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
4051 add_label_notes (XEXP (x, i), insn);
4052 else if (fmt[i] == 'E')
4053 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4054 add_label_notes (XVECEXP (x, i, j), insn);
4058 /* Compute transparent outgoing information for each block.
4060 An expression is transparent to an edge unless it is killed by
4061 the edge itself. This can only happen with abnormal control flow,
4062 when the edge is traversed through a call. This happens with
4063 non-local labels and exceptions.
4065 This would not be necessary if we split the edge. While this is
4066 normally impossible for abnormal critical edges, with some effort
4067 it should be possible with exception handling, since we still have
4068 control over which handler should be invoked. But due to increased
4069 EH table sizes, this may not be worthwhile. */
4072 compute_transpout (void)
4078 sbitmap_vector_ones (transpout, last_basic_block);
4082 /* Note that flow inserted a nop at the end of basic blocks that
4083 end in call instructions for reasons other than abnormal
4085 if (! CALL_P (BB_END (bb)))
4088 for (i = 0; i < expr_hash_table.size; i++)
4089 for (expr = expr_hash_table.table[i]; expr ; expr = expr->next_same_hash)
4090 if (MEM_P (expr->expr))
4092 if (GET_CODE (XEXP (expr->expr, 0)) == SYMBOL_REF
4093 && CONSTANT_POOL_ADDRESS_P (XEXP (expr->expr, 0)))
4096 /* ??? Optimally, we would use interprocedural alias
4097 analysis to determine if this mem is actually killed
4099 RESET_BIT (transpout[bb->index], expr->bitmap_index);
4104 /* Code Hoisting variables and subroutines. */
4106 /* Very busy expressions. */
4107 static sbitmap *hoist_vbein;
4108 static sbitmap *hoist_vbeout;
4110 /* Hoistable expressions. */
4111 static sbitmap *hoist_exprs;
4113 /* ??? We could compute post dominators and run this algorithm in
4114 reverse to perform tail merging, doing so would probably be
4115 more effective than the tail merging code in jump.c.
4117 It's unclear if tail merging could be run in parallel with
4118 code hoisting. It would be nice. */
4120 /* Allocate vars used for code hoisting analysis. */
4123 alloc_code_hoist_mem (int n_blocks, int n_exprs)
4125 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
4126 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
4127 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
4129 hoist_vbein = sbitmap_vector_alloc (n_blocks, n_exprs);
4130 hoist_vbeout = sbitmap_vector_alloc (n_blocks, n_exprs);
4131 hoist_exprs = sbitmap_vector_alloc (n_blocks, n_exprs);
4132 transpout = sbitmap_vector_alloc (n_blocks, n_exprs);
4135 /* Free vars used for code hoisting analysis. */
4138 free_code_hoist_mem (void)
4140 sbitmap_vector_free (antloc);
4141 sbitmap_vector_free (transp);
4142 sbitmap_vector_free (comp);
4144 sbitmap_vector_free (hoist_vbein);
4145 sbitmap_vector_free (hoist_vbeout);
4146 sbitmap_vector_free (hoist_exprs);
4147 sbitmap_vector_free (transpout);
4149 free_dominance_info (CDI_DOMINATORS);
4152 /* Compute the very busy expressions at entry/exit from each block.
4154 An expression is very busy if all paths from a given point
4155 compute the expression. */
4158 compute_code_hoist_vbeinout (void)
4160 int changed, passes;
4163 sbitmap_vector_zero (hoist_vbeout, last_basic_block);
4164 sbitmap_vector_zero (hoist_vbein, last_basic_block);
4173 /* We scan the blocks in the reverse order to speed up
4175 FOR_EACH_BB_REVERSE (bb)
4177 if (bb->next_bb != EXIT_BLOCK_PTR)
4178 sbitmap_intersection_of_succs (hoist_vbeout[bb->index],
4179 hoist_vbein, bb->index);
4181 changed |= sbitmap_a_or_b_and_c_cg (hoist_vbein[bb->index],
4183 hoist_vbeout[bb->index],
4191 fprintf (dump_file, "hoisting vbeinout computation: %d passes\n", passes);
4194 /* Top level routine to do the dataflow analysis needed by code hoisting. */
4197 compute_code_hoist_data (void)
4199 compute_local_properties (transp, comp, antloc, &expr_hash_table);
4200 compute_transpout ();
4201 compute_code_hoist_vbeinout ();
4202 calculate_dominance_info (CDI_DOMINATORS);
4204 fprintf (dump_file, "\n");
4207 /* Determine if the expression identified by EXPR_INDEX would
4208 reach BB unimpared if it was placed at the end of EXPR_BB.
4210 It's unclear exactly what Muchnick meant by "unimpared". It seems
4211 to me that the expression must either be computed or transparent in
4212 *every* block in the path(s) from EXPR_BB to BB. Any other definition
4213 would allow the expression to be hoisted out of loops, even if
4214 the expression wasn't a loop invariant.
4216 Contrast this to reachability for PRE where an expression is
4217 considered reachable if *any* path reaches instead of *all*
4221 hoist_expr_reaches_here_p (basic_block expr_bb, int expr_index, basic_block bb, char *visited)
4225 int visited_allocated_locally = 0;
4228 if (visited == NULL)
4230 visited_allocated_locally = 1;
4231 visited = XCNEWVEC (char, last_basic_block);
4234 FOR_EACH_EDGE (pred, ei, bb->preds)
4236 basic_block pred_bb = pred->src;
4238 if (pred->src == ENTRY_BLOCK_PTR)
4240 else if (pred_bb == expr_bb)
4242 else if (visited[pred_bb->index])
4245 /* Does this predecessor generate this expression? */
4246 else if (TEST_BIT (comp[pred_bb->index], expr_index))
4248 else if (! TEST_BIT (transp[pred_bb->index], expr_index))
4254 visited[pred_bb->index] = 1;
4255 if (! hoist_expr_reaches_here_p (expr_bb, expr_index,
4260 if (visited_allocated_locally)
4263 return (pred == NULL);
4266 /* Actually perform code hoisting. */
4271 basic_block bb, dominated;
4272 VEC (basic_block, heap) *domby;
4274 struct expr **index_map;
4278 sbitmap_vector_zero (hoist_exprs, last_basic_block);
4280 /* Compute a mapping from expression number (`bitmap_index') to
4281 hash table entry. */
4283 index_map = XCNEWVEC (struct expr *, expr_hash_table.n_elems);
4284 for (i = 0; i < expr_hash_table.size; i++)
4285 for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
4286 index_map[expr->bitmap_index] = expr;
4288 /* Walk over each basic block looking for potentially hoistable
4289 expressions, nothing gets hoisted from the entry block. */
4293 int insn_inserted_p;
4295 domby = get_dominated_by (CDI_DOMINATORS, bb);
4296 /* Examine each expression that is very busy at the exit of this
4297 block. These are the potentially hoistable expressions. */
4298 for (i = 0; i < hoist_vbeout[bb->index]->n_bits; i++)
4302 if (TEST_BIT (hoist_vbeout[bb->index], i)
4303 && TEST_BIT (transpout[bb->index], i))
4305 /* We've found a potentially hoistable expression, now
4306 we look at every block BB dominates to see if it
4307 computes the expression. */
4308 for (j = 0; VEC_iterate (basic_block, domby, j, dominated); j++)
4310 /* Ignore self dominance. */
4311 if (bb == dominated)
4313 /* We've found a dominated block, now see if it computes
4314 the busy expression and whether or not moving that
4315 expression to the "beginning" of that block is safe. */
4316 if (!TEST_BIT (antloc[dominated->index], i))
4319 /* Note if the expression would reach the dominated block
4320 unimpared if it was placed at the end of BB.
4322 Keep track of how many times this expression is hoistable
4323 from a dominated block into BB. */
4324 if (hoist_expr_reaches_here_p (bb, i, dominated, NULL))
4328 /* If we found more than one hoistable occurrence of this
4329 expression, then note it in the bitmap of expressions to
4330 hoist. It makes no sense to hoist things which are computed
4331 in only one BB, and doing so tends to pessimize register
4332 allocation. One could increase this value to try harder
4333 to avoid any possible code expansion due to register
4334 allocation issues; however experiments have shown that
4335 the vast majority of hoistable expressions are only movable
4336 from two successors, so raising this threshold is likely
4337 to nullify any benefit we get from code hoisting. */
4340 SET_BIT (hoist_exprs[bb->index], i);
4345 /* If we found nothing to hoist, then quit now. */
4348 VEC_free (basic_block, heap, domby);
4352 /* Loop over all the hoistable expressions. */
4353 for (i = 0; i < hoist_exprs[bb->index]->n_bits; i++)
4355 /* We want to insert the expression into BB only once, so
4356 note when we've inserted it. */
4357 insn_inserted_p = 0;
4359 /* These tests should be the same as the tests above. */
4360 if (TEST_BIT (hoist_exprs[bb->index], i))
4362 /* We've found a potentially hoistable expression, now
4363 we look at every block BB dominates to see if it
4364 computes the expression. */
4365 for (j = 0; VEC_iterate (basic_block, domby, j, dominated); j++)
4367 /* Ignore self dominance. */
4368 if (bb == dominated)
4371 /* We've found a dominated block, now see if it computes
4372 the busy expression and whether or not moving that
4373 expression to the "beginning" of that block is safe. */
4374 if (!TEST_BIT (antloc[dominated->index], i))
4377 /* The expression is computed in the dominated block and
4378 it would be safe to compute it at the start of the
4379 dominated block. Now we have to determine if the
4380 expression would reach the dominated block if it was
4381 placed at the end of BB. */
4382 if (hoist_expr_reaches_here_p (bb, i, dominated, NULL))
4384 struct expr *expr = index_map[i];
4385 struct occr *occr = expr->antic_occr;
4389 /* Find the right occurrence of this expression. */
4390 while (BLOCK_FOR_INSN (occr->insn) != dominated && occr)
4395 set = single_set (insn);
4398 /* Create a pseudo-reg to store the result of reaching
4399 expressions into. Get the mode for the new pseudo
4400 from the mode of the original destination pseudo. */
4401 if (expr->reaching_reg == NULL)
4403 = gen_reg_rtx_and_attrs (SET_DEST (set));
4405 gcse_emit_move_after (expr->reaching_reg, SET_DEST (set), insn);
4407 occr->deleted_p = 1;
4411 if (!insn_inserted_p)
4413 insert_insn_end_basic_block (index_map[i], bb, 0);
4414 insn_inserted_p = 1;
4420 VEC_free (basic_block, heap, domby);
4428 /* Top level routine to perform one code hoisting (aka unification) pass
4430 Return nonzero if a change was made. */
4433 one_code_hoisting_pass (void)
4437 gcse_subst_count = 0;
4438 gcse_create_count = 0;
4440 /* Return if there's nothing to do, or it is too expensive. */
4441 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
4442 || is_too_expensive (_("GCSE disabled")))
4445 /* We need alias. */
4446 init_alias_analysis ();
4449 gcc_obstack_init (&gcse_obstack);
4452 alloc_hash_table (get_max_uid (), &expr_hash_table, 0);
4453 compute_hash_table (&expr_hash_table);
4455 dump_hash_table (dump_file, "Code Hosting Expressions", &expr_hash_table);
4457 if (expr_hash_table.n_elems > 0)
4459 alloc_code_hoist_mem (last_basic_block, expr_hash_table.n_elems);
4460 compute_code_hoist_data ();
4461 changed = hoist_code ();
4462 free_code_hoist_mem ();
4465 free_hash_table (&expr_hash_table);
4467 obstack_free (&gcse_obstack, NULL);
4469 /* We are finished with alias. */
4470 end_alias_analysis ();
4474 fprintf (dump_file, "HOIST of %s, %d basic blocks, %d bytes needed, ",
4475 current_function_name (), n_basic_blocks, bytes_used);
4476 fprintf (dump_file, "%d substs, %d insns created\n",
4477 gcse_subst_count, gcse_create_count);
4483 /* Here we provide the things required to do store motion towards
4484 the exit. In order for this to be effective, gcse also needed to
4485 be taught how to move a load when it is kill only by a store to itself.
4490 void foo(float scale)
4492 for (i=0; i<10; i++)
4496 'i' is both loaded and stored to in the loop. Normally, gcse cannot move
4497 the load out since its live around the loop, and stored at the bottom
4500 The 'Load Motion' referred to and implemented in this file is
4501 an enhancement to gcse which when using edge based lcm, recognizes
4502 this situation and allows gcse to move the load out of the loop.
4504 Once gcse has hoisted the load, store motion can then push this
4505 load towards the exit, and we end up with no loads or stores of 'i'
4509 pre_ldst_expr_hash (const void *p)
4511 int do_not_record_p = 0;
4512 const struct ls_expr *const x = (const struct ls_expr *) p;
4513 return hash_rtx (x->pattern, GET_MODE (x->pattern), &do_not_record_p, NULL, false);
4517 pre_ldst_expr_eq (const void *p1, const void *p2)
4519 const struct ls_expr *const ptr1 = (const struct ls_expr *) p1,
4520 *const ptr2 = (const struct ls_expr *) p2;
4521 return expr_equiv_p (ptr1->pattern, ptr2->pattern);
4524 /* This will search the ldst list for a matching expression. If it
4525 doesn't find one, we create one and initialize it. */
4527 static struct ls_expr *
4530 int do_not_record_p = 0;
4531 struct ls_expr * ptr;
4536 hash = hash_rtx (x, GET_MODE (x), &do_not_record_p,
4537 NULL, /*have_reg_qty=*/false);
4540 slot = htab_find_slot_with_hash (pre_ldst_table, &e, hash, INSERT);
4542 return (struct ls_expr *)*slot;
4544 ptr = XNEW (struct ls_expr);
4546 ptr->next = pre_ldst_mems;
4549 ptr->pattern_regs = NULL_RTX;
4550 ptr->loads = NULL_RTX;
4551 ptr->stores = NULL_RTX;
4552 ptr->reaching_reg = NULL_RTX;
4555 ptr->hash_index = hash;
4556 pre_ldst_mems = ptr;
4562 /* Free up an individual ldst entry. */
4565 free_ldst_entry (struct ls_expr * ptr)
4567 free_INSN_LIST_list (& ptr->loads);
4568 free_INSN_LIST_list (& ptr->stores);
4573 /* Free up all memory associated with the ldst list. */
4576 free_ldst_mems (void)
4579 htab_delete (pre_ldst_table);
4580 pre_ldst_table = NULL;
4582 while (pre_ldst_mems)
4584 struct ls_expr * tmp = pre_ldst_mems;
4586 pre_ldst_mems = pre_ldst_mems->next;
4588 free_ldst_entry (tmp);
4591 pre_ldst_mems = NULL;
4594 /* Dump debugging info about the ldst list. */
4597 print_ldst_list (FILE * file)
4599 struct ls_expr * ptr;
4601 fprintf (file, "LDST list: \n");
4603 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
4605 fprintf (file, " Pattern (%3d): ", ptr->index);
4607 print_rtl (file, ptr->pattern);
4609 fprintf (file, "\n Loads : ");
4612 print_rtl (file, ptr->loads);
4614 fprintf (file, "(nil)");
4616 fprintf (file, "\n Stores : ");
4619 print_rtl (file, ptr->stores);
4621 fprintf (file, "(nil)");
4623 fprintf (file, "\n\n");
4626 fprintf (file, "\n");
4629 /* Returns 1 if X is in the list of ldst only expressions. */
4631 static struct ls_expr *
4632 find_rtx_in_ldst (rtx x)
4636 if (!pre_ldst_table)
4639 slot = htab_find_slot (pre_ldst_table, &e, NO_INSERT);
4640 if (!slot || ((struct ls_expr *)*slot)->invalid)
4642 return (struct ls_expr *) *slot;
4645 /* Assign each element of the list of mems a monotonically increasing value. */
4648 enumerate_ldsts (void)
4650 struct ls_expr * ptr;
4653 for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next)
4659 /* Return first item in the list. */
4661 static inline struct ls_expr *
4662 first_ls_expr (void)
4664 return pre_ldst_mems;
4667 /* Return the next item in the list after the specified one. */
4669 static inline struct ls_expr *
4670 next_ls_expr (struct ls_expr * ptr)
4675 /* Load Motion for loads which only kill themselves. */
4677 /* Return true if x is a simple MEM operation, with no registers or
4678 side effects. These are the types of loads we consider for the
4679 ld_motion list, otherwise we let the usual aliasing take care of it. */
4682 simple_mem (const_rtx x)
4687 if (MEM_VOLATILE_P (x))
4690 if (GET_MODE (x) == BLKmode)
4693 /* If we are handling exceptions, we must be careful with memory references
4694 that may trap. If we are not, the behavior is undefined, so we may just
4696 if (flag_non_call_exceptions && may_trap_p (x))
4699 if (side_effects_p (x))
4702 /* Do not consider function arguments passed on stack. */
4703 if (reg_mentioned_p (stack_pointer_rtx, x))
4706 if (flag_float_store && FLOAT_MODE_P (GET_MODE (x)))
4712 /* Make sure there isn't a buried reference in this pattern anywhere.
4713 If there is, invalidate the entry for it since we're not capable
4714 of fixing it up just yet.. We have to be sure we know about ALL
4715 loads since the aliasing code will allow all entries in the
4716 ld_motion list to not-alias itself. If we miss a load, we will get
4717 the wrong value since gcse might common it and we won't know to
4721 invalidate_any_buried_refs (rtx x)
4725 struct ls_expr * ptr;
4727 /* Invalidate it in the list. */
4728 if (MEM_P (x) && simple_mem (x))
4730 ptr = ldst_entry (x);
4734 /* Recursively process the insn. */
4735 fmt = GET_RTX_FORMAT (GET_CODE (x));
4737 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
4740 invalidate_any_buried_refs (XEXP (x, i));
4741 else if (fmt[i] == 'E')
4742 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4743 invalidate_any_buried_refs (XVECEXP (x, i, j));
4747 /* Find all the 'simple' MEMs which are used in LOADs and STORES. Simple
4748 being defined as MEM loads and stores to symbols, with no side effects
4749 and no registers in the expression. For a MEM destination, we also
4750 check that the insn is still valid if we replace the destination with a
4751 REG, as is done in update_ld_motion_stores. If there are any uses/defs
4752 which don't match this criteria, they are invalidated and trimmed out
4756 compute_ld_motion_mems (void)
4758 struct ls_expr * ptr;
4762 pre_ldst_mems = NULL;
4763 pre_ldst_table = htab_create (13, pre_ldst_expr_hash,
4764 pre_ldst_expr_eq, NULL);
4768 FOR_BB_INSNS (bb, insn)
4772 if (GET_CODE (PATTERN (insn)) == SET)
4774 rtx src = SET_SRC (PATTERN (insn));
4775 rtx dest = SET_DEST (PATTERN (insn));
4777 /* Check for a simple LOAD... */
4778 if (MEM_P (src) && simple_mem (src))
4780 ptr = ldst_entry (src);
4782 ptr->loads = alloc_INSN_LIST (insn, ptr->loads);
4788 /* Make sure there isn't a buried load somewhere. */
4789 invalidate_any_buried_refs (src);
4792 /* Check for stores. Don't worry about aliased ones, they
4793 will block any movement we might do later. We only care
4794 about this exact pattern since those are the only
4795 circumstance that we will ignore the aliasing info. */
4796 if (MEM_P (dest) && simple_mem (dest))
4798 ptr = ldst_entry (dest);
4801 && GET_CODE (src) != ASM_OPERANDS
4802 /* Check for REG manually since want_to_gcse_p
4803 returns 0 for all REGs. */
4804 && can_assign_to_reg_p (src))
4805 ptr->stores = alloc_INSN_LIST (insn, ptr->stores);
4811 invalidate_any_buried_refs (PATTERN (insn));
4817 /* Remove any references that have been either invalidated or are not in the
4818 expression list for pre gcse. */
4821 trim_ld_motion_mems (void)
4823 struct ls_expr * * last = & pre_ldst_mems;
4824 struct ls_expr * ptr = pre_ldst_mems;
4830 /* Delete if entry has been made invalid. */
4833 /* Delete if we cannot find this mem in the expression list. */
4834 unsigned int hash = ptr->hash_index % expr_hash_table.size;
4836 for (expr = expr_hash_table.table[hash];
4838 expr = expr->next_same_hash)
4839 if (expr_equiv_p (expr->expr, ptr->pattern))
4843 expr = (struct expr *) 0;
4847 /* Set the expression field if we are keeping it. */
4855 htab_remove_elt_with_hash (pre_ldst_table, ptr, ptr->hash_index);
4856 free_ldst_entry (ptr);
4861 /* Show the world what we've found. */
4862 if (dump_file && pre_ldst_mems != NULL)
4863 print_ldst_list (dump_file);
4866 /* This routine will take an expression which we are replacing with
4867 a reaching register, and update any stores that are needed if
4868 that expression is in the ld_motion list. Stores are updated by
4869 copying their SRC to the reaching register, and then storing
4870 the reaching register into the store location. These keeps the
4871 correct value in the reaching register for the loads. */
4874 update_ld_motion_stores (struct expr * expr)
4876 struct ls_expr * mem_ptr;
4878 if ((mem_ptr = find_rtx_in_ldst (expr->expr)))
4880 /* We can try to find just the REACHED stores, but is shouldn't
4881 matter to set the reaching reg everywhere... some might be
4882 dead and should be eliminated later. */
4884 /* We replace (set mem expr) with (set reg expr) (set mem reg)
4885 where reg is the reaching reg used in the load. We checked in
4886 compute_ld_motion_mems that we can replace (set mem expr) with
4887 (set reg expr) in that insn. */
4888 rtx list = mem_ptr->stores;
4890 for ( ; list != NULL_RTX; list = XEXP (list, 1))
4892 rtx insn = XEXP (list, 0);
4893 rtx pat = PATTERN (insn);
4894 rtx src = SET_SRC (pat);
4895 rtx reg = expr->reaching_reg;
4898 /* If we've already copied it, continue. */
4899 if (expr->reaching_reg == src)
4904 fprintf (dump_file, "PRE: store updated with reaching reg ");
4905 print_rtl (dump_file, expr->reaching_reg);
4906 fprintf (dump_file, ":\n ");
4907 print_inline_rtx (dump_file, insn, 8);
4908 fprintf (dump_file, "\n");
4911 copy = gen_move_insn (reg, copy_rtx (SET_SRC (pat)));
4912 new_rtx = emit_insn_before (copy, insn);
4913 SET_SRC (pat) = reg;
4914 df_insn_rescan (insn);
4916 /* un-recognize this pattern since it's probably different now. */
4917 INSN_CODE (insn) = -1;
4918 gcse_create_count++;
4923 /* Store motion code. */
4925 #define ANTIC_STORE_LIST(x) ((x)->loads)
4926 #define AVAIL_STORE_LIST(x) ((x)->stores)
4927 #define LAST_AVAIL_CHECK_FAILURE(x) ((x)->reaching_reg)
4929 /* This is used to communicate the target bitvector we want to use in the
4930 reg_set_info routine when called via the note_stores mechanism. */
4931 static int * regvec;
4933 /* And current insn, for the same routine. */
4934 static rtx compute_store_table_current_insn;
4936 /* Used in computing the reverse edge graph bit vectors. */
4937 static sbitmap * st_antloc;
4939 /* Global holding the number of store expressions we are dealing with. */
4940 static int num_stores;
4942 /* Checks to set if we need to mark a register set. Called from
4946 reg_set_info (rtx dest, const_rtx setter ATTRIBUTE_UNUSED,
4947 void *data ATTRIBUTE_UNUSED)
4949 if (GET_CODE (dest) == SUBREG)
4950 dest = SUBREG_REG (dest);
4953 regvec[REGNO (dest)] = INSN_UID (compute_store_table_current_insn);
4956 /* Clear any mark that says that this insn sets dest. Called from
4960 reg_clear_last_set (rtx dest, const_rtx setter ATTRIBUTE_UNUSED,
4963 int *dead_vec = (int *) data;
4965 if (GET_CODE (dest) == SUBREG)
4966 dest = SUBREG_REG (dest);
4969 dead_vec[REGNO (dest)] == INSN_UID (compute_store_table_current_insn))
4970 dead_vec[REGNO (dest)] = 0;
4973 /* Return zero if some of the registers in list X are killed
4974 due to set of registers in bitmap REGS_SET. */
4977 store_ops_ok (const_rtx x, int *regs_set)
4981 for (; x; x = XEXP (x, 1))
4984 if (regs_set[REGNO(reg)])
4991 /* Returns a list of registers mentioned in X. */
4993 extract_mentioned_regs (rtx x)
4995 return extract_mentioned_regs_helper (x, NULL_RTX);
4998 /* Helper for extract_mentioned_regs; ACCUM is used to accumulate used
5001 extract_mentioned_regs_helper (rtx x, rtx accum)
5007 /* Repeat is used to turn tail-recursion into iteration. */
5013 code = GET_CODE (x);
5017 return alloc_EXPR_LIST (0, x, accum);
5029 /* We do not run this function with arguments having side effects. */
5049 i = GET_RTX_LENGTH (code) - 1;
5050 fmt = GET_RTX_FORMAT (code);
5056 rtx tem = XEXP (x, i);
5058 /* If we are about to do the last recursive call
5059 needed at this level, change it into iteration. */
5066 accum = extract_mentioned_regs_helper (tem, accum);
5068 else if (fmt[i] == 'E')
5072 for (j = 0; j < XVECLEN (x, i); j++)
5073 accum = extract_mentioned_regs_helper (XVECEXP (x, i, j), accum);
5080 /* Determine whether INSN is MEM store pattern that we will consider moving.
5081 REGS_SET_BEFORE is bitmap of registers set before (and including) the
5082 current insn, REGS_SET_AFTER is bitmap of registers set after (and
5083 including) the insn in this basic block. We must be passing through BB from
5084 head to end, as we are using this fact to speed things up.
5086 The results are stored this way:
5088 -- the first anticipatable expression is added into ANTIC_STORE_LIST
5089 -- if the processed expression is not anticipatable, NULL_RTX is added
5090 there instead, so that we can use it as indicator that no further
5091 expression of this type may be anticipatable
5092 -- if the expression is available, it is added as head of AVAIL_STORE_LIST;
5093 consequently, all of them but this head are dead and may be deleted.
5094 -- if the expression is not available, the insn due to that it fails to be
5095 available is stored in reaching_reg.
5097 The things are complicated a bit by fact that there already may be stores
5098 to the same MEM from other blocks; also caller must take care of the
5099 necessary cleanup of the temporary markers after end of the basic block.
5103 find_moveable_store (rtx insn, int *regs_set_before, int *regs_set_after)
5105 struct ls_expr * ptr;
5107 int check_anticipatable, check_available;
5108 basic_block bb = BLOCK_FOR_INSN (insn);
5110 set = single_set (insn);
5114 dest = SET_DEST (set);
5116 if (! MEM_P (dest) || MEM_VOLATILE_P (dest)
5117 || GET_MODE (dest) == BLKmode)
5120 if (side_effects_p (dest))
5123 /* If we are handling exceptions, we must be careful with memory references
5124 that may trap. If we are not, the behavior is undefined, so we may just
5126 if (flag_non_call_exceptions && may_trap_p (dest))
5129 /* Even if the destination cannot trap, the source may. In this case we'd
5130 need to handle updating the REG_EH_REGION note. */
5131 if (find_reg_note (insn, REG_EH_REGION, NULL_RTX))
5134 /* Make sure that the SET_SRC of this store insns can be assigned to
5135 a register, or we will fail later on in replace_store_insn, which
5136 assumes that we can do this. But sometimes the target machine has
5137 oddities like MEM read-modify-write instruction. See for example
5139 if (!can_assign_to_reg_p (SET_SRC (set)))
5142 ptr = ldst_entry (dest);
5143 if (!ptr->pattern_regs)
5144 ptr->pattern_regs = extract_mentioned_regs (dest);
5146 /* Do not check for anticipatability if we either found one anticipatable
5147 store already, or tested for one and found out that it was killed. */
5148 check_anticipatable = 0;
5149 if (!ANTIC_STORE_LIST (ptr))
5150 check_anticipatable = 1;
5153 tmp = XEXP (ANTIC_STORE_LIST (ptr), 0);
5155 && BLOCK_FOR_INSN (tmp) != bb)
5156 check_anticipatable = 1;
5158 if (check_anticipatable)
5160 if (store_killed_before (dest, ptr->pattern_regs, insn, bb, regs_set_before))
5164 ANTIC_STORE_LIST (ptr) = alloc_INSN_LIST (tmp,
5165 ANTIC_STORE_LIST (ptr));
5168 /* It is not necessary to check whether store is available if we did
5169 it successfully before; if we failed before, do not bother to check
5170 until we reach the insn that caused us to fail. */
5171 check_available = 0;
5172 if (!AVAIL_STORE_LIST (ptr))
5173 check_available = 1;
5176 tmp = XEXP (AVAIL_STORE_LIST (ptr), 0);
5177 if (BLOCK_FOR_INSN (tmp) != bb)
5178 check_available = 1;
5180 if (check_available)
5182 /* Check that we have already reached the insn at that the check
5183 failed last time. */
5184 if (LAST_AVAIL_CHECK_FAILURE (ptr))
5186 for (tmp = BB_END (bb);
5187 tmp != insn && tmp != LAST_AVAIL_CHECK_FAILURE (ptr);
5188 tmp = PREV_INSN (tmp))
5191 check_available = 0;
5194 check_available = store_killed_after (dest, ptr->pattern_regs, insn,
5196 &LAST_AVAIL_CHECK_FAILURE (ptr));
5198 if (!check_available)
5199 AVAIL_STORE_LIST (ptr) = alloc_INSN_LIST (insn, AVAIL_STORE_LIST (ptr));
5202 /* Find available and anticipatable stores. */
5205 compute_store_table (void)
5211 int *last_set_in, *already_set;
5212 struct ls_expr * ptr, **prev_next_ptr_ptr;
5213 unsigned int max_gcse_regno = max_reg_num ();
5216 pre_ldst_table = htab_create (13, pre_ldst_expr_hash,
5217 pre_ldst_expr_eq, NULL);
5218 last_set_in = XCNEWVEC (int, max_gcse_regno);
5219 already_set = XNEWVEC (int, max_gcse_regno);
5221 /* Find all the stores we care about. */
5224 /* First compute the registers set in this block. */
5225 regvec = last_set_in;
5227 FOR_BB_INSNS (bb, insn)
5229 if (! INSN_P (insn))
5234 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5235 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
5236 last_set_in[regno] = INSN_UID (insn);
5239 pat = PATTERN (insn);
5240 compute_store_table_current_insn = insn;
5241 note_stores (pat, reg_set_info, NULL);
5244 /* Now find the stores. */
5245 memset (already_set, 0, sizeof (int) * max_gcse_regno);
5246 regvec = already_set;
5247 FOR_BB_INSNS (bb, insn)
5249 if (! INSN_P (insn))
5254 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5255 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
5256 already_set[regno] = 1;
5259 pat = PATTERN (insn);
5260 note_stores (pat, reg_set_info, NULL);
5262 /* Now that we've marked regs, look for stores. */
5263 find_moveable_store (insn, already_set, last_set_in);
5265 /* Unmark regs that are no longer set. */
5266 compute_store_table_current_insn = insn;
5267 note_stores (pat, reg_clear_last_set, last_set_in);
5270 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5271 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno)
5272 && last_set_in[regno] == INSN_UID (insn))
5273 last_set_in[regno] = 0;
5277 #ifdef ENABLE_CHECKING
5278 /* last_set_in should now be all-zero. */
5279 for (regno = 0; regno < max_gcse_regno; regno++)
5280 gcc_assert (!last_set_in[regno]);
5283 /* Clear temporary marks. */
5284 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
5286 LAST_AVAIL_CHECK_FAILURE(ptr) = NULL_RTX;
5287 if (ANTIC_STORE_LIST (ptr)
5288 && (tmp = XEXP (ANTIC_STORE_LIST (ptr), 0)) == NULL_RTX)
5289 ANTIC_STORE_LIST (ptr) = XEXP (ANTIC_STORE_LIST (ptr), 1);
5293 /* Remove the stores that are not available anywhere, as there will
5294 be no opportunity to optimize them. */
5295 for (ptr = pre_ldst_mems, prev_next_ptr_ptr = &pre_ldst_mems;
5297 ptr = *prev_next_ptr_ptr)
5299 if (!AVAIL_STORE_LIST (ptr))
5301 *prev_next_ptr_ptr = ptr->next;
5302 htab_remove_elt_with_hash (pre_ldst_table, ptr, ptr->hash_index);
5303 free_ldst_entry (ptr);
5306 prev_next_ptr_ptr = &ptr->next;
5309 ret = enumerate_ldsts ();
5313 fprintf (dump_file, "ST_avail and ST_antic (shown under loads..)\n");
5314 print_ldst_list (dump_file);
5322 /* Check to see if the load X is aliased with STORE_PATTERN.
5323 AFTER is true if we are checking the case when STORE_PATTERN occurs
5327 load_kills_store (const_rtx x, const_rtx store_pattern, int after)
5330 return anti_dependence (x, store_pattern);
5332 return true_dependence (store_pattern, GET_MODE (store_pattern), x,
5336 /* Go through the entire insn X, looking for any loads which might alias
5337 STORE_PATTERN. Return true if found.
5338 AFTER is true if we are checking the case when STORE_PATTERN occurs
5339 after the insn X. */
5342 find_loads (const_rtx x, const_rtx store_pattern, int after)
5351 if (GET_CODE (x) == SET)
5356 if (load_kills_store (x, store_pattern, after))
5360 /* Recursively process the insn. */
5361 fmt = GET_RTX_FORMAT (GET_CODE (x));
5363 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0 && !ret; i--)
5366 ret |= find_loads (XEXP (x, i), store_pattern, after);
5367 else if (fmt[i] == 'E')
5368 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5369 ret |= find_loads (XVECEXP (x, i, j), store_pattern, after);
5375 store_killed_in_pat (const_rtx x, const_rtx pat, int after)
5377 if (GET_CODE (pat) == SET)
5379 rtx dest = SET_DEST (pat);
5381 if (GET_CODE (dest) == ZERO_EXTRACT)
5382 dest = XEXP (dest, 0);
5384 /* Check for memory stores to aliased objects. */
5386 && !expr_equiv_p (dest, x))
5390 if (output_dependence (dest, x))
5395 if (output_dependence (x, dest))
5401 if (find_loads (pat, x, after))
5407 /* Check if INSN kills the store pattern X (is aliased with it).
5408 AFTER is true if we are checking the case when store X occurs
5409 after the insn. Return true if it does. */
5412 store_killed_in_insn (const_rtx x, const_rtx x_regs, const_rtx insn, int after)
5414 const_rtx reg, base, note, pat;
5421 /* A normal or pure call might read from pattern,
5422 but a const call will not. */
5423 if (!RTL_CONST_CALL_P (insn))
5426 /* But even a const call reads its parameters. Check whether the
5427 base of some of registers used in mem is stack pointer. */
5428 for (reg = x_regs; reg; reg = XEXP (reg, 1))
5430 base = find_base_term (XEXP (reg, 0));
5432 || (GET_CODE (base) == ADDRESS
5433 && GET_MODE (base) == Pmode
5434 && XEXP (base, 0) == stack_pointer_rtx))
5441 pat = PATTERN (insn);
5442 if (GET_CODE (pat) == SET)
5444 if (store_killed_in_pat (x, pat, after))
5447 else if (GET_CODE (pat) == PARALLEL)
5451 for (i = 0; i < XVECLEN (pat, 0); i++)
5452 if (store_killed_in_pat (x, XVECEXP (pat, 0, i), after))
5455 else if (find_loads (PATTERN (insn), x, after))
5458 /* If this insn has a REG_EQUAL or REG_EQUIV note referencing a memory
5459 location aliased with X, then this insn kills X. */
5460 note = find_reg_equal_equiv_note (insn);
5463 note = XEXP (note, 0);
5465 /* However, if the note represents a must alias rather than a may
5466 alias relationship, then it does not kill X. */
5467 if (expr_equiv_p (note, x))
5470 /* See if there are any aliased loads in the note. */
5471 return find_loads (note, x, after);
5474 /* Returns true if the expression X is loaded or clobbered on or after INSN
5475 within basic block BB. REGS_SET_AFTER is bitmap of registers set in
5476 or after the insn. X_REGS is list of registers mentioned in X. If the store
5477 is killed, return the last insn in that it occurs in FAIL_INSN. */
5480 store_killed_after (const_rtx x, const_rtx x_regs, const_rtx insn, const_basic_block bb,
5481 int *regs_set_after, rtx *fail_insn)
5483 rtx last = BB_END (bb), act;
5485 if (!store_ops_ok (x_regs, regs_set_after))
5487 /* We do not know where it will happen. */
5489 *fail_insn = NULL_RTX;
5493 /* Scan from the end, so that fail_insn is determined correctly. */
5494 for (act = last; act != PREV_INSN (insn); act = PREV_INSN (act))
5495 if (store_killed_in_insn (x, x_regs, act, false))
5505 /* Returns true if the expression X is loaded or clobbered on or before INSN
5506 within basic block BB. X_REGS is list of registers mentioned in X.
5507 REGS_SET_BEFORE is bitmap of registers set before or in this insn. */
5509 store_killed_before (const_rtx x, const_rtx x_regs, const_rtx insn, const_basic_block bb,
5510 int *regs_set_before)
5512 rtx first = BB_HEAD (bb);
5514 if (!store_ops_ok (x_regs, regs_set_before))
5517 for ( ; insn != PREV_INSN (first); insn = PREV_INSN (insn))
5518 if (store_killed_in_insn (x, x_regs, insn, true))
5524 /* Fill in available, anticipatable, transparent and kill vectors in
5525 STORE_DATA, based on lists of available and anticipatable stores. */
5527 build_store_vectors (void)
5530 int *regs_set_in_block;
5532 struct ls_expr * ptr;
5533 unsigned int max_gcse_regno = max_reg_num ();
5535 /* Build the gen_vector. This is any store in the table which is not killed
5536 by aliasing later in its block. */
5537 ae_gen = sbitmap_vector_alloc (last_basic_block, num_stores);
5538 sbitmap_vector_zero (ae_gen, last_basic_block);
5540 st_antloc = sbitmap_vector_alloc (last_basic_block, num_stores);
5541 sbitmap_vector_zero (st_antloc, last_basic_block);
5543 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
5545 for (st = AVAIL_STORE_LIST (ptr); st != NULL; st = XEXP (st, 1))
5547 insn = XEXP (st, 0);
5548 bb = BLOCK_FOR_INSN (insn);
5550 /* If we've already seen an available expression in this block,
5551 we can delete this one (It occurs earlier in the block). We'll
5552 copy the SRC expression to an unused register in case there
5553 are any side effects. */
5554 if (TEST_BIT (ae_gen[bb->index], ptr->index))
5556 rtx r = gen_reg_rtx_and_attrs (ptr->pattern);
5558 fprintf (dump_file, "Removing redundant store:\n");
5559 replace_store_insn (r, XEXP (st, 0), bb, ptr);
5562 SET_BIT (ae_gen[bb->index], ptr->index);
5565 for (st = ANTIC_STORE_LIST (ptr); st != NULL; st = XEXP (st, 1))
5567 insn = XEXP (st, 0);
5568 bb = BLOCK_FOR_INSN (insn);
5569 SET_BIT (st_antloc[bb->index], ptr->index);
5573 ae_kill = sbitmap_vector_alloc (last_basic_block, num_stores);
5574 sbitmap_vector_zero (ae_kill, last_basic_block);
5576 transp = sbitmap_vector_alloc (last_basic_block, num_stores);
5577 sbitmap_vector_zero (transp, last_basic_block);
5578 regs_set_in_block = XNEWVEC (int, max_gcse_regno);
5582 FOR_BB_INSNS (bb, insn)
5586 for (def_rec = DF_INSN_DEFS (insn); *def_rec; def_rec++)
5588 unsigned int ref_regno = DF_REF_REGNO (*def_rec);
5589 if (ref_regno < max_gcse_regno)
5590 regs_set_in_block[DF_REF_REGNO (*def_rec)] = 1;
5594 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
5596 if (store_killed_after (ptr->pattern, ptr->pattern_regs, BB_HEAD (bb),
5597 bb, regs_set_in_block, NULL))
5599 /* It should not be necessary to consider the expression
5600 killed if it is both anticipatable and available. */
5601 if (!TEST_BIT (st_antloc[bb->index], ptr->index)
5602 || !TEST_BIT (ae_gen[bb->index], ptr->index))
5603 SET_BIT (ae_kill[bb->index], ptr->index);
5606 SET_BIT (transp[bb->index], ptr->index);
5610 free (regs_set_in_block);
5614 dump_sbitmap_vector (dump_file, "st_antloc", "", st_antloc, last_basic_block);
5615 dump_sbitmap_vector (dump_file, "st_kill", "", ae_kill, last_basic_block);
5616 dump_sbitmap_vector (dump_file, "Transpt", "", transp, last_basic_block);
5617 dump_sbitmap_vector (dump_file, "st_avloc", "", ae_gen, last_basic_block);
5621 /* Insert an instruction at the beginning of a basic block, and update
5622 the BB_HEAD if needed. */
5625 insert_insn_start_basic_block (rtx insn, basic_block bb)
5627 /* Insert at start of successor block. */
5628 rtx prev = PREV_INSN (BB_HEAD (bb));
5629 rtx before = BB_HEAD (bb);
5632 if (! LABEL_P (before)
5633 && !NOTE_INSN_BASIC_BLOCK_P (before))
5636 if (prev == BB_END (bb))
5638 before = NEXT_INSN (before);
5641 insn = emit_insn_after_noloc (insn, prev, bb);
5645 fprintf (dump_file, "STORE_MOTION insert store at start of BB %d:\n",
5647 print_inline_rtx (dump_file, insn, 6);
5648 fprintf (dump_file, "\n");
5652 /* This routine will insert a store on an edge. EXPR is the ldst entry for
5653 the memory reference, and E is the edge to insert it on. Returns nonzero
5654 if an edge insertion was performed. */
5657 insert_store (struct ls_expr * expr, edge e)
5664 /* We did all the deleted before this insert, so if we didn't delete a
5665 store, then we haven't set the reaching reg yet either. */
5666 if (expr->reaching_reg == NULL_RTX)
5669 if (e->flags & EDGE_FAKE)
5672 reg = expr->reaching_reg;
5673 insn = gen_move_insn (copy_rtx (expr->pattern), reg);
5675 /* If we are inserting this expression on ALL predecessor edges of a BB,
5676 insert it at the start of the BB, and reset the insert bits on the other
5677 edges so we don't try to insert it on the other edges. */
5679 FOR_EACH_EDGE (tmp, ei, e->dest->preds)
5680 if (!(tmp->flags & EDGE_FAKE))
5682 int index = EDGE_INDEX (edge_list, tmp->src, tmp->dest);
5684 gcc_assert (index != EDGE_INDEX_NO_EDGE);
5685 if (! TEST_BIT (pre_insert_map[index], expr->index))
5689 /* If tmp is NULL, we found an insertion on every edge, blank the
5690 insertion vector for these edges, and insert at the start of the BB. */
5691 if (!tmp && bb != EXIT_BLOCK_PTR)
5693 FOR_EACH_EDGE (tmp, ei, e->dest->preds)
5695 int index = EDGE_INDEX (edge_list, tmp->src, tmp->dest);
5696 RESET_BIT (pre_insert_map[index], expr->index);
5698 insert_insn_start_basic_block (insn, bb);
5702 /* We can't put stores in the front of blocks pointed to by abnormal
5703 edges since that may put a store where one didn't used to be. */
5704 gcc_assert (!(e->flags & EDGE_ABNORMAL));
5706 insert_insn_on_edge (insn, e);
5710 fprintf (dump_file, "STORE_MOTION insert insn on edge (%d, %d):\n",
5711 e->src->index, e->dest->index);
5712 print_inline_rtx (dump_file, insn, 6);
5713 fprintf (dump_file, "\n");
5719 /* Remove any REG_EQUAL or REG_EQUIV notes containing a reference to the
5720 memory location in SMEXPR set in basic block BB.
5722 This could be rather expensive. */
5725 remove_reachable_equiv_notes (basic_block bb, struct ls_expr *smexpr)
5727 edge_iterator *stack, ei;
5730 sbitmap visited = sbitmap_alloc (last_basic_block);
5731 rtx last, insn, note;
5732 rtx mem = smexpr->pattern;
5734 stack = XNEWVEC (edge_iterator, n_basic_blocks);
5736 ei = ei_start (bb->succs);
5738 sbitmap_zero (visited);
5740 act = (EDGE_COUNT (ei_container (ei)) > 0 ? EDGE_I (ei_container (ei), 0) : NULL);
5748 sbitmap_free (visited);
5751 act = ei_edge (stack[--sp]);
5755 if (bb == EXIT_BLOCK_PTR
5756 || TEST_BIT (visited, bb->index))
5760 act = (! ei_end_p (ei)) ? ei_edge (ei) : NULL;
5763 SET_BIT (visited, bb->index);
5765 if (TEST_BIT (st_antloc[bb->index], smexpr->index))
5767 for (last = ANTIC_STORE_LIST (smexpr);
5768 BLOCK_FOR_INSN (XEXP (last, 0)) != bb;
5769 last = XEXP (last, 1))
5771 last = XEXP (last, 0);
5774 last = NEXT_INSN (BB_END (bb));
5776 for (insn = BB_HEAD (bb); insn != last; insn = NEXT_INSN (insn))
5779 note = find_reg_equal_equiv_note (insn);
5780 if (!note || !expr_equiv_p (XEXP (note, 0), mem))
5784 fprintf (dump_file, "STORE_MOTION drop REG_EQUAL note at insn %d:\n",
5786 remove_note (insn, note);
5791 act = (! ei_end_p (ei)) ? ei_edge (ei) : NULL;
5793 if (EDGE_COUNT (bb->succs) > 0)
5797 ei = ei_start (bb->succs);
5798 act = (EDGE_COUNT (ei_container (ei)) > 0 ? EDGE_I (ei_container (ei), 0) : NULL);
5803 /* This routine will replace a store with a SET to a specified register. */
5806 replace_store_insn (rtx reg, rtx del, basic_block bb, struct ls_expr *smexpr)
5808 rtx insn, mem, note, set, ptr;
5810 mem = smexpr->pattern;
5811 insn = gen_move_insn (reg, SET_SRC (single_set (del)));
5813 for (ptr = ANTIC_STORE_LIST (smexpr); ptr; ptr = XEXP (ptr, 1))
5814 if (XEXP (ptr, 0) == del)
5816 XEXP (ptr, 0) = insn;
5820 /* Move the notes from the deleted insn to its replacement. */
5821 REG_NOTES (insn) = REG_NOTES (del);
5823 /* Emit the insn AFTER all the notes are transferred.
5824 This is cheaper since we avoid df rescanning for the note change. */
5825 insn = emit_insn_after (insn, del);
5830 "STORE_MOTION delete insn in BB %d:\n ", bb->index);
5831 print_inline_rtx (dump_file, del, 6);
5832 fprintf (dump_file, "\nSTORE_MOTION replaced with insn:\n ");
5833 print_inline_rtx (dump_file, insn, 6);
5834 fprintf (dump_file, "\n");
5839 /* Now we must handle REG_EQUAL notes whose contents is equal to the mem;
5840 they are no longer accurate provided that they are reached by this
5841 definition, so drop them. */
5842 for (; insn != NEXT_INSN (BB_END (bb)); insn = NEXT_INSN (insn))
5845 set = single_set (insn);
5848 if (expr_equiv_p (SET_DEST (set), mem))
5850 note = find_reg_equal_equiv_note (insn);
5851 if (!note || !expr_equiv_p (XEXP (note, 0), mem))
5855 fprintf (dump_file, "STORE_MOTION drop REG_EQUAL note at insn %d:\n",
5857 remove_note (insn, note);
5859 remove_reachable_equiv_notes (bb, smexpr);
5863 /* Delete a store, but copy the value that would have been stored into
5864 the reaching_reg for later storing. */
5867 delete_store (struct ls_expr * expr, basic_block bb)
5871 if (expr->reaching_reg == NULL_RTX)
5872 expr->reaching_reg = gen_reg_rtx_and_attrs (expr->pattern);
5874 reg = expr->reaching_reg;
5876 for (i = AVAIL_STORE_LIST (expr); i; i = XEXP (i, 1))
5879 if (BLOCK_FOR_INSN (del) == bb)
5881 /* We know there is only one since we deleted redundant
5882 ones during the available computation. */
5883 replace_store_insn (reg, del, bb, expr);
5889 /* Free memory used by store motion. */
5892 free_store_memory (void)
5897 sbitmap_vector_free (ae_gen);
5899 sbitmap_vector_free (ae_kill);
5901 sbitmap_vector_free (transp);
5903 sbitmap_vector_free (st_antloc);
5905 sbitmap_vector_free (pre_insert_map);
5907 sbitmap_vector_free (pre_delete_map);
5909 ae_gen = ae_kill = transp = st_antloc = NULL;
5910 pre_insert_map = pre_delete_map = NULL;
5913 /* Perform store motion. Much like gcse, except we move expressions the
5914 other way by looking at the flowgraph in reverse.
5915 Return non-zero if transformations are performed by the pass. */
5918 one_store_motion_pass (void)
5922 struct ls_expr * ptr;
5923 int update_flow = 0;
5925 gcse_subst_count = 0;
5926 gcse_create_count = 0;
5928 init_alias_analysis ();
5930 /* Find all the available and anticipatable stores. */
5931 num_stores = compute_store_table ();
5932 if (num_stores == 0)
5934 htab_delete (pre_ldst_table);
5935 pre_ldst_table = NULL;
5936 end_alias_analysis ();
5940 /* Now compute kill & transp vectors. */
5941 build_store_vectors ();
5942 add_noreturn_fake_exit_edges ();
5943 connect_infinite_loops_to_exit ();
5945 edge_list = pre_edge_rev_lcm (num_stores, transp, ae_gen,
5946 st_antloc, ae_kill, &pre_insert_map,
5949 /* Now we want to insert the new stores which are going to be needed. */
5950 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
5952 /* If any of the edges we have above are abnormal, we can't move this
5954 for (x = NUM_EDGES (edge_list) - 1; x >= 0; x--)
5955 if (TEST_BIT (pre_insert_map[x], ptr->index)
5956 && (INDEX_EDGE (edge_list, x)->flags & EDGE_ABNORMAL))
5961 if (dump_file != NULL)
5963 "Can't replace store %d: abnormal edge from %d to %d\n",
5964 ptr->index, INDEX_EDGE (edge_list, x)->src->index,
5965 INDEX_EDGE (edge_list, x)->dest->index);
5969 /* Now we want to insert the new stores which are going to be needed. */
5972 if (TEST_BIT (pre_delete_map[bb->index], ptr->index))
5974 delete_store (ptr, bb);
5978 for (x = 0; x < NUM_EDGES (edge_list); x++)
5979 if (TEST_BIT (pre_insert_map[x], ptr->index))
5981 update_flow |= insert_store (ptr, INDEX_EDGE (edge_list, x));
5982 gcse_create_count++;
5987 commit_edge_insertions ();
5989 free_store_memory ();
5990 free_edge_list (edge_list);
5991 remove_fake_exit_edges ();
5992 end_alias_analysis ();
5996 fprintf (dump_file, "STORE_MOTION of %s, %d basic blocks, ",
5997 current_function_name (), n_basic_blocks);
5998 fprintf (dump_file, "%d substs, %d insns created\n",
5999 gcse_subst_count, gcse_create_count);
6002 return (gcse_subst_count > 0 || gcse_create_count > 0);
6006 /* Return true if the graph is too expensive to optimize. PASS is the
6007 optimization about to be performed. */
6010 is_too_expensive (const char *pass)
6012 /* Trying to perform global optimizations on flow graphs which have
6013 a high connectivity will take a long time and is unlikely to be
6014 particularly useful.
6016 In normal circumstances a cfg should have about twice as many
6017 edges as blocks. But we do not want to punish small functions
6018 which have a couple switch statements. Rather than simply
6019 threshold the number of blocks, uses something with a more
6020 graceful degradation. */
6021 if (n_edges > 20000 + n_basic_blocks * 4)
6023 warning (OPT_Wdisabled_optimization,
6024 "%s: %d basic blocks and %d edges/basic block",
6025 pass, n_basic_blocks, n_edges / n_basic_blocks);
6030 /* If allocating memory for the cprop bitmap would take up too much
6031 storage it's better just to disable the optimization. */
6033 * SBITMAP_SET_SIZE (max_reg_num ())
6034 * sizeof (SBITMAP_ELT_TYPE)) > MAX_GCSE_MEMORY)
6036 warning (OPT_Wdisabled_optimization,
6037 "%s: %d basic blocks and %d registers",
6038 pass, n_basic_blocks, max_reg_num ());
6047 /* Main function for the CPROP pass. */
6050 one_cprop_pass (void)
6054 /* Return if there's nothing to do, or it is too expensive. */
6055 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
6056 || is_too_expensive (_ ("const/copy propagation disabled")))
6059 global_const_prop_count = local_const_prop_count = 0;
6060 global_copy_prop_count = local_copy_prop_count = 0;
6063 gcc_obstack_init (&gcse_obstack);
6066 /* Do a local const/copy propagation pass first. The global pass
6067 only handles global opportunities.
6068 If the local pass changes something, remove any unreachable blocks
6069 because the CPROP global dataflow analysis may get into infinite
6070 loops for CFGs with unreachable blocks.
6072 FIXME: This local pass should not be necessary after CSE (but for
6073 some reason it still is). It is also (proven) not necessary
6074 to run the local pass right after FWPWOP.
6076 FIXME: The global analysis would not get into infinite loops if it
6077 would use the DF solver (via df_simple_dataflow) instead of
6078 the solver implemented in this file. */
6079 if (local_cprop_pass ())
6081 delete_unreachable_blocks ();
6085 /* Determine implicit sets. */
6086 implicit_sets = XCNEWVEC (rtx, last_basic_block);
6087 find_implicit_sets ();
6089 alloc_hash_table (get_max_uid (), &set_hash_table, 1);
6090 compute_hash_table (&set_hash_table);
6092 /* Free implicit_sets before peak usage. */
6093 free (implicit_sets);
6094 implicit_sets = NULL;
6097 dump_hash_table (dump_file, "SET", &set_hash_table);
6098 if (set_hash_table.n_elems > 0)
6103 alloc_cprop_mem (last_basic_block, set_hash_table.n_elems);
6104 compute_cprop_data ();
6106 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb, EXIT_BLOCK_PTR, next_bb)
6108 /* Reset tables used to keep track of what's still valid [since
6109 the start of the block]. */
6110 reset_opr_set_tables ();
6112 FOR_BB_INSNS (bb, insn)
6115 changed |= cprop_insn (insn);
6117 /* Keep track of everything modified by this insn. */
6118 /* ??? Need to be careful w.r.t. mods done to INSN.
6119 Don't call mark_oprs_set if we turned the
6120 insn into a NOTE. */
6121 if (! NOTE_P (insn))
6122 mark_oprs_set (insn);
6126 changed |= bypass_conditional_jumps ();
6130 free_hash_table (&set_hash_table);
6132 obstack_free (&gcse_obstack, NULL);
6136 fprintf (dump_file, "CPROP of %s, %d basic blocks, %d bytes needed, ",
6137 current_function_name (), n_basic_blocks, bytes_used);
6138 fprintf (dump_file, "%d local const props, %d local copy props, ",
6139 local_const_prop_count, local_copy_prop_count);
6140 fprintf (dump_file, "%d global const props, %d global copy props\n\n",
6141 global_const_prop_count, global_copy_prop_count);
6148 /* All the passes implemented in this file. Each pass has its
6149 own gate and execute function, and at the end of the file a
6150 pass definition for passes.c.
6152 We do not construct an accurate cfg in functions which call
6153 setjmp, so none of these passes runs if the function calls
6155 FIXME: Should just handle setjmp via REG_SETJMP notes. */
6158 gate_rtl_cprop (void)
6160 return optimize > 0 && flag_gcse
6161 && !cfun->calls_setjmp
6166 execute_rtl_cprop (void)
6168 delete_unreachable_blocks ();
6169 df_note_add_problem ();
6170 df_set_flags (DF_LR_RUN_DCE);
6172 flag_rerun_cse_after_global_opts |= one_cprop_pass ();
6179 return optimize > 0 && flag_gcse
6180 && !cfun->calls_setjmp
6181 && optimize_function_for_speed_p (cfun)
6186 execute_rtl_pre (void)
6188 delete_unreachable_blocks ();
6189 df_note_add_problem ();
6191 flag_rerun_cse_after_global_opts |= one_pre_gcse_pass ();
6196 gate_rtl_hoist (void)
6198 return optimize > 0 && flag_gcse
6199 && !cfun->calls_setjmp
6200 /* It does not make sense to run code hoisting unless we are optimizing
6201 for code size -- it rarely makes programs faster, and can make then
6202 bigger if we did PRE (when optimizing for space, we don't run PRE). */
6203 && optimize_function_for_size_p (cfun)
6208 execute_rtl_hoist (void)
6210 delete_unreachable_blocks ();
6211 df_note_add_problem ();
6213 flag_rerun_cse_after_global_opts |= one_code_hoisting_pass ();
6218 gate_rtl_store_motion (void)
6220 return optimize > 0 && flag_gcse_sm
6221 && !cfun->calls_setjmp
6222 && optimize_function_for_speed_p (cfun)
6223 && dbg_cnt (store_motion);
6227 execute_rtl_store_motion (void)
6229 delete_unreachable_blocks ();
6230 df_note_add_problem ();
6232 flag_rerun_cse_after_global_opts |= one_store_motion_pass ();
6236 struct rtl_opt_pass pass_rtl_cprop =
6241 gate_rtl_cprop, /* gate */
6242 execute_rtl_cprop, /* execute */
6245 0, /* static_pass_number */
6246 TV_CPROP, /* tv_id */
6247 PROP_cfglayout, /* properties_required */
6248 0, /* properties_provided */
6249 0, /* properties_destroyed */
6250 0, /* todo_flags_start */
6251 TODO_df_finish | TODO_verify_rtl_sharing |
6253 TODO_verify_flow | TODO_ggc_collect /* todo_flags_finish */
6257 struct rtl_opt_pass pass_rtl_pre =
6262 gate_rtl_pre, /* gate */
6263 execute_rtl_pre, /* execute */
6266 0, /* static_pass_number */
6268 PROP_cfglayout, /* properties_required */
6269 0, /* properties_provided */
6270 0, /* properties_destroyed */
6271 0, /* todo_flags_start */
6272 TODO_df_finish | TODO_verify_rtl_sharing |
6274 TODO_verify_flow | TODO_ggc_collect /* todo_flags_finish */
6278 struct rtl_opt_pass pass_rtl_hoist =
6283 gate_rtl_hoist, /* gate */
6284 execute_rtl_hoist, /* execute */
6287 0, /* static_pass_number */
6288 TV_HOIST, /* tv_id */
6289 PROP_cfglayout, /* properties_required */
6290 0, /* properties_provided */
6291 0, /* properties_destroyed */
6292 0, /* todo_flags_start */
6293 TODO_df_finish | TODO_verify_rtl_sharing |
6295 TODO_verify_flow | TODO_ggc_collect /* todo_flags_finish */
6299 struct rtl_opt_pass pass_rtl_store_motion =
6303 "store_motion", /* name */
6304 gate_rtl_store_motion, /* gate */
6305 execute_rtl_store_motion, /* execute */
6308 0, /* static_pass_number */
6310 PROP_cfglayout, /* properties_required */
6311 0, /* properties_provided */
6312 0, /* properties_destroyed */
6313 0, /* todo_flags_start */
6314 TODO_df_finish | TODO_verify_rtl_sharing |
6316 TODO_verify_flow | TODO_ggc_collect /* todo_flags_finish */
6320 #include "gt-gcse.h"