1 /* Global common subexpression elimination/Partial redundancy elimination
2 and global constant/copy propagation for GNU compiler.
3 Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
4 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
23 - reordering of memory allocation and freeing to be more space efficient
24 - do rough calc of how many regs are needed in each block, and a rough
25 calc of how many regs are available in each class and use that to
26 throttle back the code in cases where RTX_COST is minimal.
27 - a store to the same address as a load does not kill the load if the
28 source of the store is also the destination of the load. Handling this
29 allows more load motion, particularly out of loops.
33 /* References searched while implementing this.
35 Compilers Principles, Techniques and Tools
39 Global Optimization by Suppression of Partial Redundancies
41 communications of the acm, Vol. 22, Num. 2, Feb. 1979
43 A Portable Machine-Independent Global Optimizer - Design and Measurements
45 Stanford Ph.D. thesis, Dec. 1983
47 A Fast Algorithm for Code Movement Optimization
49 SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988
51 A Solution to a Problem with Morel and Renvoise's
52 Global Optimization by Suppression of Partial Redundancies
53 K-H Drechsler, M.P. Stadel
54 ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988
56 Practical Adaptation of the Global Optimization
57 Algorithm of Morel and Renvoise
59 ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991
61 Efficiently Computing Static Single Assignment Form and the Control
63 R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck
64 ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991
67 J. Knoop, O. Ruthing, B. Steffen
68 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
70 What's In a Region? Or Computing Control Dependence Regions in Near-Linear
71 Time for Reducible Flow Control
73 ACM Letters on Programming Languages and Systems,
74 Vol. 2, Num. 1-4, Mar-Dec 1993
76 An Efficient Representation for Sparse Sets
77 Preston Briggs, Linda Torczon
78 ACM Letters on Programming Languages and Systems,
79 Vol. 2, Num. 1-4, Mar-Dec 1993
81 A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion
82 K-H Drechsler, M.P. Stadel
83 ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993
85 Partial Dead Code Elimination
86 J. Knoop, O. Ruthing, B. Steffen
87 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
89 Effective Partial Redundancy Elimination
90 P. Briggs, K.D. Cooper
91 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
93 The Program Structure Tree: Computing Control Regions in Linear Time
94 R. Johnson, D. Pearson, K. Pingali
95 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
97 Optimal Code Motion: Theory and Practice
98 J. Knoop, O. Ruthing, B. Steffen
99 ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994
101 The power of assignment motion
102 J. Knoop, O. Ruthing, B. Steffen
103 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
105 Global code motion / global value numbering
107 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
109 Value Driven Redundancy Elimination
111 Rice University Ph.D. thesis, Apr. 1996
115 Massively Scalar Compiler Project, Rice University, Sep. 1996
117 High Performance Compilers for Parallel Computing
121 Advanced Compiler Design and Implementation
123 Morgan Kaufmann, 1997
125 Building an Optimizing Compiler
129 People wishing to speed up the code here should read:
130 Elimination Algorithms for Data Flow Analysis
131 B.G. Ryder, M.C. Paull
132 ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986
134 How to Analyze Large Programs Efficiently and Informatively
135 D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck
136 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
138 People wishing to do something different can find various possibilities
139 in the above papers and elsewhere.
144 #include "coretypes.h"
146 #include "diagnostic-core.h"
153 #include "hard-reg-set.h"
155 #include "insn-config.h"
157 #include "basic-block.h"
159 #include "function.h"
168 #include "tree-pass.h"
175 /* We support GCSE via Partial Redundancy Elimination. PRE optimizations
176 are a superset of those done by classic GCSE.
178 We perform the following steps:
180 1) Compute table of places where registers are set.
182 2) Perform copy/constant propagation.
184 3) Perform global cse using lazy code motion if not optimizing
185 for size, or code hoisting if we are.
187 4) Perform another pass of copy/constant propagation. Try to bypass
188 conditional jumps if the condition can be computed from a value of
191 Two passes of copy/constant propagation are done because the first one
192 enables more GCSE and the second one helps to clean up the copies that
193 GCSE creates. This is needed more for PRE than for Classic because Classic
194 GCSE will try to use an existing register containing the common
195 subexpression rather than create a new one. This is harder to do for PRE
196 because of the code motion (which Classic GCSE doesn't do).
198 Expressions we are interested in GCSE-ing are of the form
199 (set (pseudo-reg) (expression)).
200 Function want_to_gcse_p says what these are.
202 In addition, expressions in REG_EQUAL notes are candidates for GCSE-ing.
203 This allows PRE to hoist expressions that are expressed in multiple insns,
204 such as complex address calculations (e.g. for PIC code, or loads with a
205 high part and a low part).
207 PRE handles moving invariant expressions out of loops (by treating them as
208 partially redundant).
210 **********************
212 We used to support multiple passes but there are diminishing returns in
213 doing so. The first pass usually makes 90% of the changes that are doable.
214 A second pass can make a few more changes made possible by the first pass.
215 Experiments show any further passes don't make enough changes to justify
218 A study of spec92 using an unlimited number of passes:
219 [1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83,
220 [6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2,
221 [12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1
223 It was found doing copy propagation between each pass enables further
226 This study was done before expressions in REG_EQUAL notes were added as
227 candidate expressions for optimization, and before the GIMPLE optimizers
228 were added. Probably, multiple passes is even less efficient now than
229 at the time when the study was conducted.
231 PRE is quite expensive in complicated functions because the DFA can take
232 a while to converge. Hence we only perform one pass.
234 **********************
236 The steps for PRE are:
238 1) Build the hash table of expressions we wish to GCSE (expr_hash_table).
240 2) Perform the data flow analysis for PRE.
242 3) Delete the redundant instructions
244 4) Insert the required copies [if any] that make the partially
245 redundant instructions fully redundant.
247 5) For other reaching expressions, insert an instruction to copy the value
248 to a newly created pseudo that will reach the redundant instruction.
250 The deletion is done first so that when we do insertions we
251 know which pseudo reg to use.
253 Various papers have argued that PRE DFA is expensive (O(n^2)) and others
254 argue it is not. The number of iterations for the algorithm to converge
255 is typically 2-4 so I don't view it as that expensive (relatively speaking).
257 PRE GCSE depends heavily on the second CPROP pass to clean up the copies
258 we create. To make an expression reach the place where it's redundant,
259 the result of the expression is copied to a new register, and the redundant
260 expression is deleted by replacing it with this new register. Classic GCSE
261 doesn't have this problem as much as it computes the reaching defs of
262 each register in each block and thus can try to use an existing
265 /* GCSE global vars. */
267 struct target_gcse default_target_gcse;
268 #if SWITCHABLE_TARGET
269 struct target_gcse *this_target_gcse = &default_target_gcse;
272 /* Set to non-zero if CSE should run after all GCSE optimizations are done. */
273 int flag_rerun_cse_after_global_opts;
275 /* An obstack for our working variables. */
276 static struct obstack gcse_obstack;
278 struct reg_use {rtx reg_rtx; };
280 /* Hash table of expressions. */
284 /* The expression (SET_SRC for expressions, PATTERN for assignments). */
286 /* Index in the available expression bitmaps. */
288 /* Next entry with the same hash. */
289 struct expr *next_same_hash;
290 /* List of anticipatable occurrences in basic blocks in the function.
291 An "anticipatable occurrence" is one that is the first occurrence in the
292 basic block, the operands are not modified in the basic block prior
293 to the occurrence and the output is not used between the start of
294 the block and the occurrence. */
295 struct occr *antic_occr;
296 /* List of available occurrence in basic blocks in the function.
297 An "available occurrence" is one that is the last occurrence in the
298 basic block and the operands are not modified by following statements in
299 the basic block [including this insn]. */
300 struct occr *avail_occr;
301 /* Non-null if the computation is PRE redundant.
302 The value is the newly created pseudo-reg to record a copy of the
303 expression in all the places that reach the redundant copy. */
305 /* Maximum distance in instructions this expression can travel.
306 We avoid moving simple expressions for more than a few instructions
307 to keep register pressure under control.
308 A value of "0" removes restrictions on how far the expression can
313 /* Occurrence of an expression.
314 There is one per basic block. If a pattern appears more than once the
315 last appearance is used [or first for anticipatable expressions]. */
319 /* Next occurrence of this expression. */
321 /* The insn that computes the expression. */
323 /* Nonzero if this [anticipatable] occurrence has been deleted. */
325 /* Nonzero if this [available] occurrence has been copied to
327 /* ??? This is mutually exclusive with deleted_p, so they could share
332 typedef struct occr *occr_t;
334 DEF_VEC_ALLOC_P (occr_t, heap);
336 /* Expression and copy propagation hash tables.
337 Each hash table is an array of buckets.
338 ??? It is known that if it were an array of entries, structure elements
339 `next_same_hash' and `bitmap_index' wouldn't be necessary. However, it is
340 not clear whether in the final analysis a sufficient amount of memory would
341 be saved as the size of the available expression bitmaps would be larger
342 [one could build a mapping table without holes afterwards though].
343 Someday I'll perform the computation and figure it out. */
348 This is an array of `expr_hash_table_size' elements. */
351 /* Size of the hash table, in elements. */
354 /* Number of hash table elements. */
355 unsigned int n_elems;
357 /* Whether the table is expression of copy propagation one. */
361 /* Expression hash table. */
362 static struct hash_table_d expr_hash_table;
364 /* Copy propagation hash table. */
365 static struct hash_table_d set_hash_table;
367 /* This is a list of expressions which are MEMs and will be used by load
369 Load motion tracks MEMs which aren't killed by
370 anything except itself. (i.e., loads and stores to a single location).
371 We can then allow movement of these MEM refs with a little special
372 allowance. (all stores copy the same value to the reaching reg used
373 for the loads). This means all values used to store into memory must have
374 no side effects so we can re-issue the setter value.
375 Store Motion uses this structure as an expression table to track stores
376 which look interesting, and might be moveable towards the exit block. */
380 struct expr * expr; /* Gcse expression reference for LM. */
381 rtx pattern; /* Pattern of this mem. */
382 rtx pattern_regs; /* List of registers mentioned by the mem. */
383 rtx loads; /* INSN list of loads seen. */
384 rtx stores; /* INSN list of stores seen. */
385 struct ls_expr * next; /* Next in the list. */
386 int invalid; /* Invalid for some reason. */
387 int index; /* If it maps to a bitmap index. */
388 unsigned int hash_index; /* Index when in a hash table. */
389 rtx reaching_reg; /* Register to use when re-writing. */
392 /* Array of implicit set patterns indexed by basic block index. */
393 static rtx *implicit_sets;
395 /* Head of the list of load/store memory refs. */
396 static struct ls_expr * pre_ldst_mems = NULL;
398 /* Hashtable for the load/store memory refs. */
399 static htab_t pre_ldst_table = NULL;
401 /* Bitmap containing one bit for each register in the program.
402 Used when performing GCSE to track which registers have been set since
403 the start of the basic block. */
404 static regset reg_set_bitmap;
406 /* Array, indexed by basic block number for a list of insns which modify
407 memory within that block. */
408 static rtx * modify_mem_list;
409 static bitmap modify_mem_list_set;
411 /* This array parallels modify_mem_list, but is kept canonicalized. */
412 static rtx * canon_modify_mem_list;
414 /* Bitmap indexed by block numbers to record which blocks contain
416 static bitmap blocks_with_calls;
418 /* Various variables for statistics gathering. */
420 /* Memory used in a pass.
421 This isn't intended to be absolutely precise. Its intent is only
422 to keep an eye on memory usage. */
423 static int bytes_used;
425 /* GCSE substitutions made. */
426 static int gcse_subst_count;
427 /* Number of copy instructions created. */
428 static int gcse_create_count;
429 /* Number of local constants propagated. */
430 static int local_const_prop_count;
431 /* Number of local copies propagated. */
432 static int local_copy_prop_count;
433 /* Number of global constants propagated. */
434 static int global_const_prop_count;
435 /* Number of global copies propagated. */
436 static int global_copy_prop_count;
438 /* Doing code hoisting. */
439 static bool doing_code_hoisting_p = false;
441 /* For available exprs */
442 static sbitmap *ae_kill;
444 static void compute_can_copy (void);
445 static void *gmalloc (size_t) ATTRIBUTE_MALLOC;
446 static void *gcalloc (size_t, size_t) ATTRIBUTE_MALLOC;
447 static void *gcse_alloc (unsigned long);
448 static void alloc_gcse_mem (void);
449 static void free_gcse_mem (void);
450 static void hash_scan_insn (rtx, struct hash_table_d *);
451 static void hash_scan_set (rtx, rtx, struct hash_table_d *);
452 static void hash_scan_clobber (rtx, rtx, struct hash_table_d *);
453 static void hash_scan_call (rtx, rtx, struct hash_table_d *);
454 static int want_to_gcse_p (rtx, int *);
455 static bool gcse_constant_p (const_rtx);
456 static int oprs_unchanged_p (const_rtx, const_rtx, int);
457 static int oprs_anticipatable_p (const_rtx, const_rtx);
458 static int oprs_available_p (const_rtx, const_rtx);
459 static void insert_expr_in_table (rtx, enum machine_mode, rtx, int, int, int,
460 struct hash_table_d *);
461 static void insert_set_in_table (rtx, rtx, struct hash_table_d *);
462 static unsigned int hash_expr (const_rtx, enum machine_mode, int *, int);
463 static unsigned int hash_set (int, int);
464 static int expr_equiv_p (const_rtx, const_rtx);
465 static void record_last_reg_set_info (rtx, int);
466 static void record_last_mem_set_info (rtx);
467 static void record_last_set_info (rtx, const_rtx, void *);
468 static void compute_hash_table (struct hash_table_d *);
469 static void alloc_hash_table (struct hash_table_d *, int);
470 static void free_hash_table (struct hash_table_d *);
471 static void compute_hash_table_work (struct hash_table_d *);
472 static void dump_hash_table (FILE *, const char *, struct hash_table_d *);
473 static struct expr *lookup_set (unsigned int, struct hash_table_d *);
474 static struct expr *next_set (unsigned int, struct expr *);
475 static void reset_opr_set_tables (void);
476 static int oprs_not_set_p (const_rtx, const_rtx);
477 static void mark_call (rtx);
478 static void mark_set (rtx, rtx);
479 static void mark_clobber (rtx, rtx);
480 static void mark_oprs_set (rtx);
481 static void alloc_cprop_mem (int, int);
482 static void free_cprop_mem (void);
483 static void compute_transp (const_rtx, int, sbitmap *, int);
484 static void compute_local_properties (sbitmap *, sbitmap *, sbitmap *,
485 struct hash_table_d *);
486 static void compute_cprop_data (void);
487 static void find_used_regs (rtx *, void *);
488 static int try_replace_reg (rtx, rtx, rtx);
489 static struct expr *find_avail_set (int, rtx);
490 static int cprop_jump (basic_block, rtx, rtx, rtx, rtx);
491 static void mems_conflict_for_gcse_p (rtx, const_rtx, void *);
492 static int load_killed_in_block_p (const_basic_block, int, const_rtx, int);
493 static void canon_list_insert (rtx, const_rtx, void *);
494 static int cprop_insn (rtx);
495 static void find_implicit_sets (void);
496 static int one_cprop_pass (void);
497 static bool constprop_register (rtx, rtx, rtx);
498 static struct expr *find_bypass_set (int, int);
499 static bool reg_killed_on_edge (const_rtx, const_edge);
500 static int bypass_block (basic_block, rtx, rtx);
501 static int bypass_conditional_jumps (void);
502 static void alloc_pre_mem (int, int);
503 static void free_pre_mem (void);
504 static void compute_pre_data (void);
505 static int pre_expr_reaches_here_p (basic_block, struct expr *,
507 static void insert_insn_end_basic_block (struct expr *, basic_block);
508 static void pre_insert_copy_insn (struct expr *, rtx);
509 static void pre_insert_copies (void);
510 static int pre_delete (void);
511 static int pre_gcse (void);
512 static int one_pre_gcse_pass (void);
513 static void add_label_notes (rtx, rtx);
514 static void alloc_code_hoist_mem (int, int);
515 static void free_code_hoist_mem (void);
516 static void compute_code_hoist_vbeinout (void);
517 static void compute_code_hoist_data (void);
518 static int hoist_expr_reaches_here_p (basic_block, int, basic_block, char *,
520 static int hoist_code (void);
521 static int one_code_hoisting_pass (void);
522 static rtx process_insert_insn (struct expr *);
523 static int pre_edge_insert (struct edge_list *, struct expr **);
524 static int pre_expr_reaches_here_p_work (basic_block, struct expr *,
525 basic_block, char *);
526 static struct ls_expr * ldst_entry (rtx);
527 static void free_ldst_entry (struct ls_expr *);
528 static void free_ldst_mems (void);
529 static void print_ldst_list (FILE *);
530 static struct ls_expr * find_rtx_in_ldst (rtx);
531 static inline struct ls_expr * first_ls_expr (void);
532 static inline struct ls_expr * next_ls_expr (struct ls_expr *);
533 static int simple_mem (const_rtx);
534 static void invalidate_any_buried_refs (rtx);
535 static void compute_ld_motion_mems (void);
536 static void trim_ld_motion_mems (void);
537 static void update_ld_motion_stores (struct expr *);
538 static void free_insn_expr_list_list (rtx *);
539 static void clear_modify_mem_tables (void);
540 static void free_modify_mem_tables (void);
541 static rtx gcse_emit_move_after (rtx, rtx, rtx);
542 static void local_cprop_find_used_regs (rtx *, void *);
543 static bool do_local_cprop (rtx, rtx);
544 static int local_cprop_pass (void);
545 static bool is_too_expensive (const char *);
547 #define GNEW(T) ((T *) gmalloc (sizeof (T)))
548 #define GCNEW(T) ((T *) gcalloc (1, sizeof (T)))
550 #define GNEWVEC(T, N) ((T *) gmalloc (sizeof (T) * (N)))
551 #define GCNEWVEC(T, N) ((T *) gcalloc ((N), sizeof (T)))
553 #define GNEWVAR(T, S) ((T *) gmalloc ((S)))
554 #define GCNEWVAR(T, S) ((T *) gcalloc (1, (S)))
556 #define GOBNEW(T) ((T *) gcse_alloc (sizeof (T)))
557 #define GOBNEWVAR(T, S) ((T *) gcse_alloc ((S)))
559 /* Misc. utilities. */
562 (this_target_gcse->x_can_copy)
563 #define can_copy_init_p \
564 (this_target_gcse->x_can_copy_init_p)
566 /* Compute which modes support reg/reg copy operations. */
569 compute_can_copy (void)
572 #ifndef AVOID_CCMODE_COPIES
575 memset (can_copy, 0, NUM_MACHINE_MODES);
578 for (i = 0; i < NUM_MACHINE_MODES; i++)
579 if (GET_MODE_CLASS (i) == MODE_CC)
581 #ifdef AVOID_CCMODE_COPIES
584 reg = gen_rtx_REG ((enum machine_mode) i, LAST_VIRTUAL_REGISTER + 1);
585 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, reg));
586 if (recog (PATTERN (insn), insn, NULL) >= 0)
596 /* Returns whether the mode supports reg/reg copy operations. */
599 can_copy_p (enum machine_mode mode)
601 if (! can_copy_init_p)
604 can_copy_init_p = true;
607 return can_copy[mode] != 0;
611 /* Cover function to xmalloc to record bytes allocated. */
614 gmalloc (size_t size)
617 return xmalloc (size);
620 /* Cover function to xcalloc to record bytes allocated. */
623 gcalloc (size_t nelem, size_t elsize)
625 bytes_used += nelem * elsize;
626 return xcalloc (nelem, elsize);
629 /* Cover function to obstack_alloc. */
632 gcse_alloc (unsigned long size)
635 return obstack_alloc (&gcse_obstack, size);
638 /* Allocate memory for the reg/memory set tracking tables.
639 This is called at the start of each pass. */
642 alloc_gcse_mem (void)
644 /* Allocate vars to track sets of regs. */
645 reg_set_bitmap = ALLOC_REG_SET (NULL);
647 /* Allocate array to keep a list of insns which modify memory in each
649 modify_mem_list = GCNEWVEC (rtx, last_basic_block);
650 canon_modify_mem_list = GCNEWVEC (rtx, last_basic_block);
651 modify_mem_list_set = BITMAP_ALLOC (NULL);
652 blocks_with_calls = BITMAP_ALLOC (NULL);
655 /* Free memory allocated by alloc_gcse_mem. */
660 free_modify_mem_tables ();
661 BITMAP_FREE (modify_mem_list_set);
662 BITMAP_FREE (blocks_with_calls);
665 /* Compute the local properties of each recorded expression.
667 Local properties are those that are defined by the block, irrespective of
670 An expression is transparent in a block if its operands are not modified
673 An expression is computed (locally available) in a block if it is computed
674 at least once and expression would contain the same value if the
675 computation was moved to the end of the block.
677 An expression is locally anticipatable in a block if it is computed at
678 least once and expression would contain the same value if the computation
679 was moved to the beginning of the block.
681 We call this routine for cprop, pre and code hoisting. They all compute
682 basically the same information and thus can easily share this code.
684 TRANSP, COMP, and ANTLOC are destination sbitmaps for recording local
685 properties. If NULL, then it is not necessary to compute or record that
688 TABLE controls which hash table to look at. If it is set hash table,
689 additionally, TRANSP is computed as ~TRANSP, since this is really cprop's
693 compute_local_properties (sbitmap *transp, sbitmap *comp, sbitmap *antloc,
694 struct hash_table_d *table)
698 /* Initialize any bitmaps that were passed in. */
702 sbitmap_vector_zero (transp, last_basic_block);
704 sbitmap_vector_ones (transp, last_basic_block);
708 sbitmap_vector_zero (comp, last_basic_block);
710 sbitmap_vector_zero (antloc, last_basic_block);
712 for (i = 0; i < table->size; i++)
716 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
718 int indx = expr->bitmap_index;
721 /* The expression is transparent in this block if it is not killed.
722 We start by assuming all are transparent [none are killed], and
723 then reset the bits for those that are. */
725 compute_transp (expr->expr, indx, transp, table->set_p);
727 /* The occurrences recorded in antic_occr are exactly those that
728 we want to set to nonzero in ANTLOC. */
730 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
732 SET_BIT (antloc[BLOCK_FOR_INSN (occr->insn)->index], indx);
734 /* While we're scanning the table, this is a good place to
739 /* The occurrences recorded in avail_occr are exactly those that
740 we want to set to nonzero in COMP. */
742 for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
744 SET_BIT (comp[BLOCK_FOR_INSN (occr->insn)->index], indx);
746 /* While we're scanning the table, this is a good place to
751 /* While we're scanning the table, this is a good place to
753 expr->reaching_reg = 0;
758 /* Hash table support. */
760 struct reg_avail_info
767 static struct reg_avail_info *reg_avail_info;
768 static basic_block current_bb;
771 /* See whether X, the source of a set, is something we want to consider for
775 want_to_gcse_p (rtx x, int *max_distance_ptr)
778 /* On register stack architectures, don't GCSE constants from the
779 constant pool, as the benefits are often swamped by the overhead
780 of shuffling the register stack between basic blocks. */
781 if (IS_STACK_MODE (GET_MODE (x)))
782 x = avoid_constant_pool_reference (x);
785 /* GCSE'ing constants:
787 We do not specifically distinguish between constant and non-constant
788 expressions in PRE and Hoist. We use rtx_cost below to limit
789 the maximum distance simple expressions can travel.
791 Nevertheless, constants are much easier to GCSE, and, hence,
792 it is easy to overdo the optimizations. Usually, excessive PRE and
793 Hoisting of constant leads to increased register pressure.
795 RA can deal with this by rematerialing some of the constants.
796 Therefore, it is important that the back-end generates sets of constants
797 in a way that allows reload rematerialize them under high register
798 pressure, i.e., a pseudo register with REG_EQUAL to constant
799 is set only once. Failing to do so will result in IRA/reload
800 spilling such constants under high register pressure instead of
801 rematerializing them. */
803 switch (GET_CODE (x))
814 if (!doing_code_hoisting_p)
815 /* Do not PRE constants. */
821 if (doing_code_hoisting_p)
822 /* PRE doesn't implement max_distance restriction. */
827 gcc_assert (!optimize_function_for_speed_p (cfun)
828 && optimize_function_for_size_p (cfun));
829 cost = rtx_cost (x, SET, 0);
831 if (cost < COSTS_N_INSNS (GCSE_UNRESTRICTED_COST))
833 max_distance = (GCSE_COST_DISTANCE_RATIO * cost) / 10;
834 if (max_distance == 0)
837 gcc_assert (max_distance > 0);
842 if (max_distance_ptr)
843 *max_distance_ptr = max_distance;
846 return can_assign_to_reg_without_clobbers_p (x);
850 /* Used internally by can_assign_to_reg_without_clobbers_p. */
852 static GTY(()) rtx test_insn;
854 /* Return true if we can assign X to a pseudo register such that the
855 resulting insn does not result in clobbering a hard register as a
858 Additionally, if the target requires it, check that the resulting insn
859 can be copied. If it cannot, this means that X is special and probably
860 has hidden side-effects we don't want to mess with.
862 This function is typically used by code motion passes, to verify
863 that it is safe to insert an insn without worrying about clobbering
864 maybe live hard regs. */
867 can_assign_to_reg_without_clobbers_p (rtx x)
869 int num_clobbers = 0;
872 /* If this is a valid operand, we are OK. If it's VOIDmode, we aren't. */
873 if (general_operand (x, GET_MODE (x)))
875 else if (GET_MODE (x) == VOIDmode)
878 /* Otherwise, check if we can make a valid insn from it. First initialize
879 our test insn if we haven't already. */
883 = make_insn_raw (gen_rtx_SET (VOIDmode,
884 gen_rtx_REG (word_mode,
885 FIRST_PSEUDO_REGISTER * 2),
887 NEXT_INSN (test_insn) = PREV_INSN (test_insn) = 0;
890 /* Now make an insn like the one we would make when GCSE'ing and see if
892 PUT_MODE (SET_DEST (PATTERN (test_insn)), GET_MODE (x));
893 SET_SRC (PATTERN (test_insn)) = x;
895 icode = recog (PATTERN (test_insn), test_insn, &num_clobbers);
899 if (num_clobbers > 0 && added_clobbers_hard_reg_p (icode))
902 if (targetm.cannot_copy_insn_p && targetm.cannot_copy_insn_p (test_insn))
908 /* Return nonzero if the operands of expression X are unchanged from the
909 start of INSN's basic block up to but not including INSN (if AVAIL_P == 0),
910 or from INSN to the end of INSN's basic block (if AVAIL_P != 0). */
913 oprs_unchanged_p (const_rtx x, const_rtx insn, int avail_p)
927 struct reg_avail_info *info = ®_avail_info[REGNO (x)];
929 if (info->last_bb != current_bb)
932 return info->last_set < DF_INSN_LUID (insn);
934 return info->first_set >= DF_INSN_LUID (insn);
938 if (load_killed_in_block_p (current_bb, DF_INSN_LUID (insn),
942 return oprs_unchanged_p (XEXP (x, 0), insn, avail_p);
969 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
973 /* If we are about to do the last recursive call needed at this
974 level, change it into iteration. This function is called enough
977 return oprs_unchanged_p (XEXP (x, i), insn, avail_p);
979 else if (! oprs_unchanged_p (XEXP (x, i), insn, avail_p))
982 else if (fmt[i] == 'E')
983 for (j = 0; j < XVECLEN (x, i); j++)
984 if (! oprs_unchanged_p (XVECEXP (x, i, j), insn, avail_p))
991 /* Used for communication between mems_conflict_for_gcse_p and
992 load_killed_in_block_p. Nonzero if mems_conflict_for_gcse_p finds a
993 conflict between two memory references. */
994 static int gcse_mems_conflict_p;
996 /* Used for communication between mems_conflict_for_gcse_p and
997 load_killed_in_block_p. A memory reference for a load instruction,
998 mems_conflict_for_gcse_p will see if a memory store conflicts with
1000 static const_rtx gcse_mem_operand;
1002 /* DEST is the output of an instruction. If it is a memory reference, and
1003 possibly conflicts with the load found in gcse_mem_operand, then set
1004 gcse_mems_conflict_p to a nonzero value. */
1007 mems_conflict_for_gcse_p (rtx dest, const_rtx setter ATTRIBUTE_UNUSED,
1008 void *data ATTRIBUTE_UNUSED)
1010 while (GET_CODE (dest) == SUBREG
1011 || GET_CODE (dest) == ZERO_EXTRACT
1012 || GET_CODE (dest) == STRICT_LOW_PART)
1013 dest = XEXP (dest, 0);
1015 /* If DEST is not a MEM, then it will not conflict with the load. Note
1016 that function calls are assumed to clobber memory, but are handled
1021 /* If we are setting a MEM in our list of specially recognized MEMs,
1022 don't mark as killed this time. */
1024 if (expr_equiv_p (dest, gcse_mem_operand) && pre_ldst_mems != NULL)
1026 if (!find_rtx_in_ldst (dest))
1027 gcse_mems_conflict_p = 1;
1031 if (true_dependence (dest, GET_MODE (dest), gcse_mem_operand,
1033 gcse_mems_conflict_p = 1;
1036 /* Return nonzero if the expression in X (a memory reference) is killed
1037 in block BB before or after the insn with the LUID in UID_LIMIT.
1038 AVAIL_P is nonzero for kills after UID_LIMIT, and zero for kills
1041 To check the entire block, set UID_LIMIT to max_uid + 1 and
1045 load_killed_in_block_p (const_basic_block bb, int uid_limit, const_rtx x, int avail_p)
1047 rtx list_entry = modify_mem_list[bb->index];
1049 /* If this is a readonly then we aren't going to be changing it. */
1050 if (MEM_READONLY_P (x))
1056 /* Ignore entries in the list that do not apply. */
1058 && DF_INSN_LUID (XEXP (list_entry, 0)) < uid_limit)
1060 && DF_INSN_LUID (XEXP (list_entry, 0)) > uid_limit))
1062 list_entry = XEXP (list_entry, 1);
1066 setter = XEXP (list_entry, 0);
1068 /* If SETTER is a call everything is clobbered. Note that calls
1069 to pure functions are never put on the list, so we need not
1070 worry about them. */
1071 if (CALL_P (setter))
1074 /* SETTER must be an INSN of some kind that sets memory. Call
1075 note_stores to examine each hunk of memory that is modified.
1077 The note_stores interface is pretty limited, so we have to
1078 communicate via global variables. Yuk. */
1079 gcse_mem_operand = x;
1080 gcse_mems_conflict_p = 0;
1081 note_stores (PATTERN (setter), mems_conflict_for_gcse_p, NULL);
1082 if (gcse_mems_conflict_p)
1084 list_entry = XEXP (list_entry, 1);
1089 /* Return nonzero if the operands of expression X are unchanged from
1090 the start of INSN's basic block up to but not including INSN. */
1093 oprs_anticipatable_p (const_rtx x, const_rtx insn)
1095 return oprs_unchanged_p (x, insn, 0);
1098 /* Return nonzero if the operands of expression X are unchanged from
1099 INSN to the end of INSN's basic block. */
1102 oprs_available_p (const_rtx x, const_rtx insn)
1104 return oprs_unchanged_p (x, insn, 1);
1107 /* Hash expression X.
1109 MODE is only used if X is a CONST_INT. DO_NOT_RECORD_P is a boolean
1110 indicating if a volatile operand is found or if the expression contains
1111 something we don't want to insert in the table. HASH_TABLE_SIZE is
1112 the current size of the hash table to be probed. */
1115 hash_expr (const_rtx x, enum machine_mode mode, int *do_not_record_p,
1116 int hash_table_size)
1120 *do_not_record_p = 0;
1122 hash = hash_rtx (x, mode, do_not_record_p,
1123 NULL, /*have_reg_qty=*/false);
1124 return hash % hash_table_size;
1127 /* Hash a set of register REGNO.
1129 Sets are hashed on the register that is set. This simplifies the PRE copy
1132 ??? May need to make things more elaborate. Later, as necessary. */
1135 hash_set (int regno, int hash_table_size)
1140 return hash % hash_table_size;
1143 /* Return nonzero if exp1 is equivalent to exp2. */
1146 expr_equiv_p (const_rtx x, const_rtx y)
1148 return exp_equiv_p (x, y, 0, true);
1151 /* Insert expression X in INSN in the hash TABLE.
1152 If it is already present, record it as the last occurrence in INSN's
1155 MODE is the mode of the value X is being stored into.
1156 It is only used if X is a CONST_INT.
1158 ANTIC_P is nonzero if X is an anticipatable expression.
1159 AVAIL_P is nonzero if X is an available expression.
1161 MAX_DISTANCE is the maximum distance in instructions this expression can
1165 insert_expr_in_table (rtx x, enum machine_mode mode, rtx insn, int antic_p,
1166 int avail_p, int max_distance, struct hash_table_d *table)
1168 int found, do_not_record_p;
1170 struct expr *cur_expr, *last_expr = NULL;
1171 struct occr *antic_occr, *avail_occr;
1173 hash = hash_expr (x, mode, &do_not_record_p, table->size);
1175 /* Do not insert expression in table if it contains volatile operands,
1176 or if hash_expr determines the expression is something we don't want
1177 to or can't handle. */
1178 if (do_not_record_p)
1181 cur_expr = table->table[hash];
1184 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
1186 /* If the expression isn't found, save a pointer to the end of
1188 last_expr = cur_expr;
1189 cur_expr = cur_expr->next_same_hash;
1194 cur_expr = GOBNEW (struct expr);
1195 bytes_used += sizeof (struct expr);
1196 if (table->table[hash] == NULL)
1197 /* This is the first pattern that hashed to this index. */
1198 table->table[hash] = cur_expr;
1200 /* Add EXPR to end of this hash chain. */
1201 last_expr->next_same_hash = cur_expr;
1203 /* Set the fields of the expr element. */
1205 cur_expr->bitmap_index = table->n_elems++;
1206 cur_expr->next_same_hash = NULL;
1207 cur_expr->antic_occr = NULL;
1208 cur_expr->avail_occr = NULL;
1209 gcc_assert (max_distance >= 0);
1210 cur_expr->max_distance = max_distance;
1213 gcc_assert (cur_expr->max_distance == max_distance);
1215 /* Now record the occurrence(s). */
1218 antic_occr = cur_expr->antic_occr;
1221 && BLOCK_FOR_INSN (antic_occr->insn) != BLOCK_FOR_INSN (insn))
1225 /* Found another instance of the expression in the same basic block.
1226 Prefer the currently recorded one. We want the first one in the
1227 block and the block is scanned from start to end. */
1228 ; /* nothing to do */
1231 /* First occurrence of this expression in this basic block. */
1232 antic_occr = GOBNEW (struct occr);
1233 bytes_used += sizeof (struct occr);
1234 antic_occr->insn = insn;
1235 antic_occr->next = cur_expr->antic_occr;
1236 antic_occr->deleted_p = 0;
1237 cur_expr->antic_occr = antic_occr;
1243 avail_occr = cur_expr->avail_occr;
1246 && BLOCK_FOR_INSN (avail_occr->insn) == BLOCK_FOR_INSN (insn))
1248 /* Found another instance of the expression in the same basic block.
1249 Prefer this occurrence to the currently recorded one. We want
1250 the last one in the block and the block is scanned from start
1252 avail_occr->insn = insn;
1256 /* First occurrence of this expression in this basic block. */
1257 avail_occr = GOBNEW (struct occr);
1258 bytes_used += sizeof (struct occr);
1259 avail_occr->insn = insn;
1260 avail_occr->next = cur_expr->avail_occr;
1261 avail_occr->deleted_p = 0;
1262 cur_expr->avail_occr = avail_occr;
1267 /* Insert pattern X in INSN in the hash table.
1268 X is a SET of a reg to either another reg or a constant.
1269 If it is already present, record it as the last occurrence in INSN's
1273 insert_set_in_table (rtx x, rtx insn, struct hash_table_d *table)
1277 struct expr *cur_expr, *last_expr = NULL;
1278 struct occr *cur_occr;
1280 gcc_assert (GET_CODE (x) == SET && REG_P (SET_DEST (x)));
1282 hash = hash_set (REGNO (SET_DEST (x)), table->size);
1284 cur_expr = table->table[hash];
1287 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
1289 /* If the expression isn't found, save a pointer to the end of
1291 last_expr = cur_expr;
1292 cur_expr = cur_expr->next_same_hash;
1297 cur_expr = GOBNEW (struct expr);
1298 bytes_used += sizeof (struct expr);
1299 if (table->table[hash] == NULL)
1300 /* This is the first pattern that hashed to this index. */
1301 table->table[hash] = cur_expr;
1303 /* Add EXPR to end of this hash chain. */
1304 last_expr->next_same_hash = cur_expr;
1306 /* Set the fields of the expr element.
1307 We must copy X because it can be modified when copy propagation is
1308 performed on its operands. */
1309 cur_expr->expr = copy_rtx (x);
1310 cur_expr->bitmap_index = table->n_elems++;
1311 cur_expr->next_same_hash = NULL;
1312 cur_expr->antic_occr = NULL;
1313 cur_expr->avail_occr = NULL;
1314 /* Not used for set_p tables. */
1315 cur_expr->max_distance = 0;
1318 /* Now record the occurrence. */
1319 cur_occr = cur_expr->avail_occr;
1322 && BLOCK_FOR_INSN (cur_occr->insn) == BLOCK_FOR_INSN (insn))
1324 /* Found another instance of the expression in the same basic block.
1325 Prefer this occurrence to the currently recorded one. We want
1326 the last one in the block and the block is scanned from start
1328 cur_occr->insn = insn;
1332 /* First occurrence of this expression in this basic block. */
1333 cur_occr = GOBNEW (struct occr);
1334 bytes_used += sizeof (struct occr);
1335 cur_occr->insn = insn;
1336 cur_occr->next = cur_expr->avail_occr;
1337 cur_occr->deleted_p = 0;
1338 cur_expr->avail_occr = cur_occr;
1342 /* Determine whether the rtx X should be treated as a constant for
1343 the purposes of GCSE's constant propagation. */
1346 gcse_constant_p (const_rtx x)
1348 /* Consider a COMPARE of two integers constant. */
1349 if (GET_CODE (x) == COMPARE
1350 && CONST_INT_P (XEXP (x, 0))
1351 && CONST_INT_P (XEXP (x, 1)))
1354 /* Consider a COMPARE of the same registers is a constant
1355 if they are not floating point registers. */
1356 if (GET_CODE(x) == COMPARE
1357 && REG_P (XEXP (x, 0)) && REG_P (XEXP (x, 1))
1358 && REGNO (XEXP (x, 0)) == REGNO (XEXP (x, 1))
1359 && ! FLOAT_MODE_P (GET_MODE (XEXP (x, 0)))
1360 && ! FLOAT_MODE_P (GET_MODE (XEXP (x, 1))))
1363 /* Since X might be inserted more than once we have to take care that it
1365 return CONSTANT_P (x) && (GET_CODE (x) != CONST || shared_const_p (x));
1368 /* Scan pattern PAT of INSN and add an entry to the hash TABLE (set or
1372 hash_scan_set (rtx pat, rtx insn, struct hash_table_d *table)
1374 rtx src = SET_SRC (pat);
1375 rtx dest = SET_DEST (pat);
1378 if (GET_CODE (src) == CALL)
1379 hash_scan_call (src, insn, table);
1381 else if (REG_P (dest))
1383 unsigned int regno = REGNO (dest);
1385 int max_distance = 0;
1387 /* See if a REG_EQUAL note shows this equivalent to a simpler expression.
1389 This allows us to do a single GCSE pass and still eliminate
1390 redundant constants, addresses or other expressions that are
1391 constructed with multiple instructions.
1393 However, keep the original SRC if INSN is a simple reg-reg move. In
1394 In this case, there will almost always be a REG_EQUAL note on the
1395 insn that sets SRC. By recording the REG_EQUAL value here as SRC
1396 for INSN, we miss copy propagation opportunities and we perform the
1397 same PRE GCSE operation repeatedly on the same REG_EQUAL value if we
1398 do more than one PRE GCSE pass.
1400 Note that this does not impede profitable constant propagations. We
1401 "look through" reg-reg sets in lookup_avail_set. */
1402 note = find_reg_equal_equiv_note (insn);
1404 && REG_NOTE_KIND (note) == REG_EQUAL
1407 ? gcse_constant_p (XEXP (note, 0))
1408 : want_to_gcse_p (XEXP (note, 0), NULL)))
1409 src = XEXP (note, 0), pat = gen_rtx_SET (VOIDmode, dest, src);
1411 /* Only record sets of pseudo-regs in the hash table. */
1413 && regno >= FIRST_PSEUDO_REGISTER
1414 /* Don't GCSE something if we can't do a reg/reg copy. */
1415 && can_copy_p (GET_MODE (dest))
1416 /* GCSE commonly inserts instruction after the insn. We can't
1417 do that easily for EH edges so disable GCSE on these for now. */
1418 /* ??? We can now easily create new EH landing pads at the
1419 gimple level, for splitting edges; there's no reason we
1420 can't do the same thing at the rtl level. */
1421 && !can_throw_internal (insn)
1422 /* Is SET_SRC something we want to gcse? */
1423 && want_to_gcse_p (src, &max_distance)
1424 /* Don't CSE a nop. */
1425 && ! set_noop_p (pat)
1426 /* Don't GCSE if it has attached REG_EQUIV note.
1427 At this point this only function parameters should have
1428 REG_EQUIV notes and if the argument slot is used somewhere
1429 explicitly, it means address of parameter has been taken,
1430 so we should not extend the lifetime of the pseudo. */
1431 && (note == NULL_RTX || ! MEM_P (XEXP (note, 0))))
1433 /* An expression is not anticipatable if its operands are
1434 modified before this insn or if this is not the only SET in
1435 this insn. The latter condition does not have to mean that
1436 SRC itself is not anticipatable, but we just will not be
1437 able to handle code motion of insns with multiple sets. */
1438 int antic_p = oprs_anticipatable_p (src, insn)
1439 && !multiple_sets (insn);
1440 /* An expression is not available if its operands are
1441 subsequently modified, including this insn. It's also not
1442 available if this is a branch, because we can't insert
1443 a set after the branch. */
1444 int avail_p = (oprs_available_p (src, insn)
1445 && ! JUMP_P (insn));
1447 insert_expr_in_table (src, GET_MODE (dest), insn, antic_p, avail_p,
1448 max_distance, table);
1451 /* Record sets for constant/copy propagation. */
1452 else if (table->set_p
1453 && regno >= FIRST_PSEUDO_REGISTER
1455 && REGNO (src) >= FIRST_PSEUDO_REGISTER
1456 && can_copy_p (GET_MODE (dest))
1457 && REGNO (src) != regno)
1458 || gcse_constant_p (src))
1459 /* A copy is not available if its src or dest is subsequently
1460 modified. Here we want to search from INSN+1 on, but
1461 oprs_available_p searches from INSN on. */
1462 && (insn == BB_END (BLOCK_FOR_INSN (insn))
1463 || (tmp = next_nonnote_insn (insn)) == NULL_RTX
1464 || BLOCK_FOR_INSN (tmp) != BLOCK_FOR_INSN (insn)
1465 || oprs_available_p (pat, tmp)))
1466 insert_set_in_table (pat, insn, table);
1468 /* In case of store we want to consider the memory value as available in
1469 the REG stored in that memory. This makes it possible to remove
1470 redundant loads from due to stores to the same location. */
1471 else if (flag_gcse_las && REG_P (src) && MEM_P (dest))
1473 unsigned int regno = REGNO (src);
1474 int max_distance = 0;
1476 /* Do not do this for constant/copy propagation. */
1478 /* Only record sets of pseudo-regs in the hash table. */
1479 && regno >= FIRST_PSEUDO_REGISTER
1480 /* Don't GCSE something if we can't do a reg/reg copy. */
1481 && can_copy_p (GET_MODE (src))
1482 /* GCSE commonly inserts instruction after the insn. We can't
1483 do that easily for EH edges so disable GCSE on these for now. */
1484 && !can_throw_internal (insn)
1485 /* Is SET_DEST something we want to gcse? */
1486 && want_to_gcse_p (dest, &max_distance)
1487 /* Don't CSE a nop. */
1488 && ! set_noop_p (pat)
1489 /* Don't GCSE if it has attached REG_EQUIV note.
1490 At this point this only function parameters should have
1491 REG_EQUIV notes and if the argument slot is used somewhere
1492 explicitly, it means address of parameter has been taken,
1493 so we should not extend the lifetime of the pseudo. */
1494 && ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) == 0
1495 || ! MEM_P (XEXP (note, 0))))
1497 /* Stores are never anticipatable. */
1499 /* An expression is not available if its operands are
1500 subsequently modified, including this insn. It's also not
1501 available if this is a branch, because we can't insert
1502 a set after the branch. */
1503 int avail_p = oprs_available_p (dest, insn)
1506 /* Record the memory expression (DEST) in the hash table. */
1507 insert_expr_in_table (dest, GET_MODE (dest), insn,
1508 antic_p, avail_p, max_distance, table);
1514 hash_scan_clobber (rtx x ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED,
1515 struct hash_table_d *table ATTRIBUTE_UNUSED)
1517 /* Currently nothing to do. */
1521 hash_scan_call (rtx x ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED,
1522 struct hash_table_d *table ATTRIBUTE_UNUSED)
1524 /* Currently nothing to do. */
1527 /* Process INSN and add hash table entries as appropriate.
1529 Only available expressions that set a single pseudo-reg are recorded.
1531 Single sets in a PARALLEL could be handled, but it's an extra complication
1532 that isn't dealt with right now. The trick is handling the CLOBBERs that
1533 are also in the PARALLEL. Later.
1535 If SET_P is nonzero, this is for the assignment hash table,
1536 otherwise it is for the expression hash table. */
1539 hash_scan_insn (rtx insn, struct hash_table_d *table)
1541 rtx pat = PATTERN (insn);
1544 /* Pick out the sets of INSN and for other forms of instructions record
1545 what's been modified. */
1547 if (GET_CODE (pat) == SET)
1548 hash_scan_set (pat, insn, table);
1549 else if (GET_CODE (pat) == PARALLEL)
1550 for (i = 0; i < XVECLEN (pat, 0); i++)
1552 rtx x = XVECEXP (pat, 0, i);
1554 if (GET_CODE (x) == SET)
1555 hash_scan_set (x, insn, table);
1556 else if (GET_CODE (x) == CLOBBER)
1557 hash_scan_clobber (x, insn, table);
1558 else if (GET_CODE (x) == CALL)
1559 hash_scan_call (x, insn, table);
1562 else if (GET_CODE (pat) == CLOBBER)
1563 hash_scan_clobber (pat, insn, table);
1564 else if (GET_CODE (pat) == CALL)
1565 hash_scan_call (pat, insn, table);
1569 dump_hash_table (FILE *file, const char *name, struct hash_table_d *table)
1572 /* Flattened out table, so it's printed in proper order. */
1573 struct expr **flat_table;
1574 unsigned int *hash_val;
1577 flat_table = XCNEWVEC (struct expr *, table->n_elems);
1578 hash_val = XNEWVEC (unsigned int, table->n_elems);
1580 for (i = 0; i < (int) table->size; i++)
1581 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
1583 flat_table[expr->bitmap_index] = expr;
1584 hash_val[expr->bitmap_index] = i;
1587 fprintf (file, "%s hash table (%d buckets, %d entries)\n",
1588 name, table->size, table->n_elems);
1590 for (i = 0; i < (int) table->n_elems; i++)
1591 if (flat_table[i] != 0)
1593 expr = flat_table[i];
1594 fprintf (file, "Index %d (hash value %d; max distance %d)\n ",
1595 expr->bitmap_index, hash_val[i], expr->max_distance);
1596 print_rtl (file, expr->expr);
1597 fprintf (file, "\n");
1600 fprintf (file, "\n");
1606 /* Record register first/last/block set information for REGNO in INSN.
1608 first_set records the first place in the block where the register
1609 is set and is used to compute "anticipatability".
1611 last_set records the last place in the block where the register
1612 is set and is used to compute "availability".
1614 last_bb records the block for which first_set and last_set are
1615 valid, as a quick test to invalidate them. */
1618 record_last_reg_set_info (rtx insn, int regno)
1620 struct reg_avail_info *info = ®_avail_info[regno];
1621 int luid = DF_INSN_LUID (insn);
1623 info->last_set = luid;
1624 if (info->last_bb != current_bb)
1626 info->last_bb = current_bb;
1627 info->first_set = luid;
1632 /* Record all of the canonicalized MEMs of record_last_mem_set_info's insn.
1633 Note we store a pair of elements in the list, so they have to be
1634 taken off pairwise. */
1637 canon_list_insert (rtx dest ATTRIBUTE_UNUSED, const_rtx unused1 ATTRIBUTE_UNUSED,
1640 rtx dest_addr, insn;
1643 while (GET_CODE (dest) == SUBREG
1644 || GET_CODE (dest) == ZERO_EXTRACT
1645 || GET_CODE (dest) == STRICT_LOW_PART)
1646 dest = XEXP (dest, 0);
1648 /* If DEST is not a MEM, then it will not conflict with a load. Note
1649 that function calls are assumed to clobber memory, but are handled
1655 dest_addr = get_addr (XEXP (dest, 0));
1656 dest_addr = canon_rtx (dest_addr);
1657 insn = (rtx) v_insn;
1658 bb = BLOCK_FOR_INSN (insn)->index;
1660 canon_modify_mem_list[bb] =
1661 alloc_EXPR_LIST (VOIDmode, dest_addr, canon_modify_mem_list[bb]);
1662 canon_modify_mem_list[bb] =
1663 alloc_EXPR_LIST (VOIDmode, dest, canon_modify_mem_list[bb]);
1666 /* Record memory modification information for INSN. We do not actually care
1667 about the memory location(s) that are set, or even how they are set (consider
1668 a CALL_INSN). We merely need to record which insns modify memory. */
1671 record_last_mem_set_info (rtx insn)
1673 int bb = BLOCK_FOR_INSN (insn)->index;
1675 /* load_killed_in_block_p will handle the case of calls clobbering
1677 modify_mem_list[bb] = alloc_INSN_LIST (insn, modify_mem_list[bb]);
1678 bitmap_set_bit (modify_mem_list_set, bb);
1682 /* Note that traversals of this loop (other than for free-ing)
1683 will break after encountering a CALL_INSN. So, there's no
1684 need to insert a pair of items, as canon_list_insert does. */
1685 canon_modify_mem_list[bb] =
1686 alloc_INSN_LIST (insn, canon_modify_mem_list[bb]);
1687 bitmap_set_bit (blocks_with_calls, bb);
1690 note_stores (PATTERN (insn), canon_list_insert, (void*) insn);
1693 /* Called from compute_hash_table via note_stores to handle one
1694 SET or CLOBBER in an insn. DATA is really the instruction in which
1695 the SET is taking place. */
1698 record_last_set_info (rtx dest, const_rtx setter ATTRIBUTE_UNUSED, void *data)
1700 rtx last_set_insn = (rtx) data;
1702 if (GET_CODE (dest) == SUBREG)
1703 dest = SUBREG_REG (dest);
1706 record_last_reg_set_info (last_set_insn, REGNO (dest));
1707 else if (MEM_P (dest)
1708 /* Ignore pushes, they clobber nothing. */
1709 && ! push_operand (dest, GET_MODE (dest)))
1710 record_last_mem_set_info (last_set_insn);
1713 /* Top level function to create an expression or assignment hash table.
1715 Expression entries are placed in the hash table if
1716 - they are of the form (set (pseudo-reg) src),
1717 - src is something we want to perform GCSE on,
1718 - none of the operands are subsequently modified in the block
1720 Assignment entries are placed in the hash table if
1721 - they are of the form (set (pseudo-reg) src),
1722 - src is something we want to perform const/copy propagation on,
1723 - none of the operands or target are subsequently modified in the block
1725 Currently src must be a pseudo-reg or a const_int.
1727 TABLE is the table computed. */
1730 compute_hash_table_work (struct hash_table_d *table)
1734 /* re-Cache any INSN_LIST nodes we have allocated. */
1735 clear_modify_mem_tables ();
1736 /* Some working arrays used to track first and last set in each block. */
1737 reg_avail_info = GNEWVEC (struct reg_avail_info, max_reg_num ());
1739 for (i = 0; i < max_reg_num (); ++i)
1740 reg_avail_info[i].last_bb = NULL;
1742 FOR_EACH_BB (current_bb)
1747 /* First pass over the instructions records information used to
1748 determine when registers and memory are first and last set. */
1749 FOR_BB_INSNS (current_bb, insn)
1751 if (! INSN_P (insn))
1756 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
1757 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
1758 record_last_reg_set_info (insn, regno);
1763 note_stores (PATTERN (insn), record_last_set_info, insn);
1766 /* Insert implicit sets in the hash table. */
1768 && implicit_sets[current_bb->index] != NULL_RTX)
1769 hash_scan_set (implicit_sets[current_bb->index],
1770 BB_HEAD (current_bb), table);
1772 /* The next pass builds the hash table. */
1773 FOR_BB_INSNS (current_bb, insn)
1775 hash_scan_insn (insn, table);
1778 free (reg_avail_info);
1779 reg_avail_info = NULL;
1782 /* Allocate space for the set/expr hash TABLE.
1783 It is used to determine the number of buckets to use.
1784 SET_P determines whether set or expression table will
1788 alloc_hash_table (struct hash_table_d *table, int set_p)
1792 n = get_max_insn_count ();
1794 table->size = n / 4;
1795 if (table->size < 11)
1798 /* Attempt to maintain efficient use of hash table.
1799 Making it an odd number is simplest for now.
1800 ??? Later take some measurements. */
1802 n = table->size * sizeof (struct expr *);
1803 table->table = GNEWVAR (struct expr *, n);
1804 table->set_p = set_p;
1807 /* Free things allocated by alloc_hash_table. */
1810 free_hash_table (struct hash_table_d *table)
1812 free (table->table);
1815 /* Compute the hash TABLE for doing copy/const propagation or
1816 expression hash table. */
1819 compute_hash_table (struct hash_table_d *table)
1821 /* Initialize count of number of entries in hash table. */
1823 memset (table->table, 0, table->size * sizeof (struct expr *));
1825 compute_hash_table_work (table);
1828 /* Expression tracking support. */
1830 /* Lookup REGNO in the set TABLE. The result is a pointer to the
1831 table entry, or NULL if not found. */
1833 static struct expr *
1834 lookup_set (unsigned int regno, struct hash_table_d *table)
1836 unsigned int hash = hash_set (regno, table->size);
1839 expr = table->table[hash];
1841 while (expr && REGNO (SET_DEST (expr->expr)) != regno)
1842 expr = expr->next_same_hash;
1847 /* Return the next entry for REGNO in list EXPR. */
1849 static struct expr *
1850 next_set (unsigned int regno, struct expr *expr)
1853 expr = expr->next_same_hash;
1854 while (expr && REGNO (SET_DEST (expr->expr)) != regno);
1859 /* Like free_INSN_LIST_list or free_EXPR_LIST_list, except that the node
1860 types may be mixed. */
1863 free_insn_expr_list_list (rtx *listp)
1867 for (list = *listp; list ; list = next)
1869 next = XEXP (list, 1);
1870 if (GET_CODE (list) == EXPR_LIST)
1871 free_EXPR_LIST_node (list);
1873 free_INSN_LIST_node (list);
1879 /* Clear canon_modify_mem_list and modify_mem_list tables. */
1881 clear_modify_mem_tables (void)
1886 EXECUTE_IF_SET_IN_BITMAP (modify_mem_list_set, 0, i, bi)
1888 free_INSN_LIST_list (modify_mem_list + i);
1889 free_insn_expr_list_list (canon_modify_mem_list + i);
1891 bitmap_clear (modify_mem_list_set);
1892 bitmap_clear (blocks_with_calls);
1895 /* Release memory used by modify_mem_list_set. */
1898 free_modify_mem_tables (void)
1900 clear_modify_mem_tables ();
1901 free (modify_mem_list);
1902 free (canon_modify_mem_list);
1903 modify_mem_list = 0;
1904 canon_modify_mem_list = 0;
1907 /* Reset tables used to keep track of what's still available [since the
1908 start of the block]. */
1911 reset_opr_set_tables (void)
1913 /* Maintain a bitmap of which regs have been set since beginning of
1915 CLEAR_REG_SET (reg_set_bitmap);
1917 /* Also keep a record of the last instruction to modify memory.
1918 For now this is very trivial, we only record whether any memory
1919 location has been modified. */
1920 clear_modify_mem_tables ();
1923 /* Return nonzero if the operands of X are not set before INSN in
1924 INSN's basic block. */
1927 oprs_not_set_p (const_rtx x, const_rtx insn)
1936 code = GET_CODE (x);
1953 if (load_killed_in_block_p (BLOCK_FOR_INSN (insn),
1954 DF_INSN_LUID (insn), x, 0))
1957 return oprs_not_set_p (XEXP (x, 0), insn);
1960 return ! REGNO_REG_SET_P (reg_set_bitmap, REGNO (x));
1966 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
1970 /* If we are about to do the last recursive call
1971 needed at this level, change it into iteration.
1972 This function is called enough to be worth it. */
1974 return oprs_not_set_p (XEXP (x, i), insn);
1976 if (! oprs_not_set_p (XEXP (x, i), insn))
1979 else if (fmt[i] == 'E')
1980 for (j = 0; j < XVECLEN (x, i); j++)
1981 if (! oprs_not_set_p (XVECEXP (x, i, j), insn))
1988 /* Mark things set by a CALL. */
1991 mark_call (rtx insn)
1993 if (! RTL_CONST_OR_PURE_CALL_P (insn))
1994 record_last_mem_set_info (insn);
1997 /* Mark things set by a SET. */
2000 mark_set (rtx pat, rtx insn)
2002 rtx dest = SET_DEST (pat);
2004 while (GET_CODE (dest) == SUBREG
2005 || GET_CODE (dest) == ZERO_EXTRACT
2006 || GET_CODE (dest) == STRICT_LOW_PART)
2007 dest = XEXP (dest, 0);
2010 SET_REGNO_REG_SET (reg_set_bitmap, REGNO (dest));
2011 else if (MEM_P (dest))
2012 record_last_mem_set_info (insn);
2014 if (GET_CODE (SET_SRC (pat)) == CALL)
2018 /* Record things set by a CLOBBER. */
2021 mark_clobber (rtx pat, rtx insn)
2023 rtx clob = XEXP (pat, 0);
2025 while (GET_CODE (clob) == SUBREG || GET_CODE (clob) == STRICT_LOW_PART)
2026 clob = XEXP (clob, 0);
2029 SET_REGNO_REG_SET (reg_set_bitmap, REGNO (clob));
2031 record_last_mem_set_info (insn);
2034 /* Record things set by INSN.
2035 This data is used by oprs_not_set_p. */
2038 mark_oprs_set (rtx insn)
2040 rtx pat = PATTERN (insn);
2043 if (GET_CODE (pat) == SET)
2044 mark_set (pat, insn);
2045 else if (GET_CODE (pat) == PARALLEL)
2046 for (i = 0; i < XVECLEN (pat, 0); i++)
2048 rtx x = XVECEXP (pat, 0, i);
2050 if (GET_CODE (x) == SET)
2052 else if (GET_CODE (x) == CLOBBER)
2053 mark_clobber (x, insn);
2054 else if (GET_CODE (x) == CALL)
2058 else if (GET_CODE (pat) == CLOBBER)
2059 mark_clobber (pat, insn);
2060 else if (GET_CODE (pat) == CALL)
2065 /* Compute copy/constant propagation working variables. */
2067 /* Local properties of assignments. */
2068 static sbitmap *cprop_pavloc;
2069 static sbitmap *cprop_absaltered;
2071 /* Global properties of assignments (computed from the local properties). */
2072 static sbitmap *cprop_avin;
2073 static sbitmap *cprop_avout;
2075 /* Allocate vars used for copy/const propagation. N_BLOCKS is the number of
2076 basic blocks. N_SETS is the number of sets. */
2079 alloc_cprop_mem (int n_blocks, int n_sets)
2081 cprop_pavloc = sbitmap_vector_alloc (n_blocks, n_sets);
2082 cprop_absaltered = sbitmap_vector_alloc (n_blocks, n_sets);
2084 cprop_avin = sbitmap_vector_alloc (n_blocks, n_sets);
2085 cprop_avout = sbitmap_vector_alloc (n_blocks, n_sets);
2088 /* Free vars used by copy/const propagation. */
2091 free_cprop_mem (void)
2093 sbitmap_vector_free (cprop_pavloc);
2094 sbitmap_vector_free (cprop_absaltered);
2095 sbitmap_vector_free (cprop_avin);
2096 sbitmap_vector_free (cprop_avout);
2099 /* For each block, compute whether X is transparent. X is either an
2100 expression or an assignment [though we don't care which, for this context
2101 an assignment is treated as an expression]. For each block where an
2102 element of X is modified, set (SET_P == 1) or reset (SET_P == 0) the INDX
2106 compute_transp (const_rtx x, int indx, sbitmap *bmap, int set_p)
2112 /* repeat is used to turn tail-recursion into iteration since GCC
2113 can't do it when there's no return value. */
2119 code = GET_CODE (x);
2126 for (def = DF_REG_DEF_CHAIN (REGNO (x));
2128 def = DF_REF_NEXT_REG (def))
2129 SET_BIT (bmap[DF_REF_BB (def)->index], indx);
2134 for (def = DF_REG_DEF_CHAIN (REGNO (x));
2136 def = DF_REF_NEXT_REG (def))
2137 RESET_BIT (bmap[DF_REF_BB (def)->index], indx);
2143 if (! MEM_READONLY_P (x))
2148 /* First handle all the blocks with calls. We don't need to
2149 do any list walking for them. */
2150 EXECUTE_IF_SET_IN_BITMAP (blocks_with_calls, 0, bb_index, bi)
2153 SET_BIT (bmap[bb_index], indx);
2155 RESET_BIT (bmap[bb_index], indx);
2158 /* Now iterate over the blocks which have memory modifications
2159 but which do not have any calls. */
2160 EXECUTE_IF_AND_COMPL_IN_BITMAP (modify_mem_list_set,
2164 rtx list_entry = canon_modify_mem_list[bb_index];
2168 rtx dest, dest_addr;
2170 /* LIST_ENTRY must be an INSN of some kind that sets memory.
2171 Examine each hunk of memory that is modified. */
2173 dest = XEXP (list_entry, 0);
2174 list_entry = XEXP (list_entry, 1);
2175 dest_addr = XEXP (list_entry, 0);
2177 if (canon_true_dependence (dest, GET_MODE (dest), dest_addr,
2178 x, NULL_RTX, rtx_addr_varies_p))
2181 SET_BIT (bmap[bb_index], indx);
2183 RESET_BIT (bmap[bb_index], indx);
2186 list_entry = XEXP (list_entry, 1);
2211 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2215 /* If we are about to do the last recursive call
2216 needed at this level, change it into iteration.
2217 This function is called enough to be worth it. */
2224 compute_transp (XEXP (x, i), indx, bmap, set_p);
2226 else if (fmt[i] == 'E')
2227 for (j = 0; j < XVECLEN (x, i); j++)
2228 compute_transp (XVECEXP (x, i, j), indx, bmap, set_p);
2232 /* Top level routine to do the dataflow analysis needed by copy/const
2236 compute_cprop_data (void)
2238 compute_local_properties (cprop_absaltered, cprop_pavloc, NULL, &set_hash_table);
2239 compute_available (cprop_pavloc, cprop_absaltered,
2240 cprop_avout, cprop_avin);
2243 /* Copy/constant propagation. */
2245 /* Maximum number of register uses in an insn that we handle. */
2248 /* Table of uses found in an insn.
2249 Allocated statically to avoid alloc/free complexity and overhead. */
2250 static struct reg_use reg_use_table[MAX_USES];
2252 /* Index into `reg_use_table' while building it. */
2253 static int reg_use_count;
2255 /* Set up a list of register numbers used in INSN. The found uses are stored
2256 in `reg_use_table'. `reg_use_count' is initialized to zero before entry,
2257 and contains the number of uses in the table upon exit.
2259 ??? If a register appears multiple times we will record it multiple times.
2260 This doesn't hurt anything but it will slow things down. */
2263 find_used_regs (rtx *xptr, void *data ATTRIBUTE_UNUSED)
2270 /* repeat is used to turn tail-recursion into iteration since GCC
2271 can't do it when there's no return value. */
2276 code = GET_CODE (x);
2279 if (reg_use_count == MAX_USES)
2282 reg_use_table[reg_use_count].reg_rtx = x;
2286 /* Recursively scan the operands of this expression. */
2288 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2292 /* If we are about to do the last recursive call
2293 needed at this level, change it into iteration.
2294 This function is called enough to be worth it. */
2301 find_used_regs (&XEXP (x, i), data);
2303 else if (fmt[i] == 'E')
2304 for (j = 0; j < XVECLEN (x, i); j++)
2305 find_used_regs (&XVECEXP (x, i, j), data);
2309 /* Try to replace all non-SET_DEST occurrences of FROM in INSN with TO.
2310 Returns nonzero is successful. */
2313 try_replace_reg (rtx from, rtx to, rtx insn)
2315 rtx note = find_reg_equal_equiv_note (insn);
2318 rtx set = single_set (insn);
2320 /* Usually we substitute easy stuff, so we won't copy everything.
2321 We however need to take care to not duplicate non-trivial CONST
2325 validate_replace_src_group (from, to, insn);
2326 if (num_changes_pending () && apply_change_group ())
2329 /* Try to simplify SET_SRC if we have substituted a constant. */
2330 if (success && set && CONSTANT_P (to))
2332 src = simplify_rtx (SET_SRC (set));
2335 validate_change (insn, &SET_SRC (set), src, 0);
2338 /* If there is already a REG_EQUAL note, update the expression in it
2339 with our replacement. */
2340 if (note != 0 && REG_NOTE_KIND (note) == REG_EQUAL)
2341 set_unique_reg_note (insn, REG_EQUAL,
2342 simplify_replace_rtx (XEXP (note, 0), from, to));
2343 if (!success && set && reg_mentioned_p (from, SET_SRC (set)))
2345 /* If above failed and this is a single set, try to simplify the source of
2346 the set given our substitution. We could perhaps try this for multiple
2347 SETs, but it probably won't buy us anything. */
2348 src = simplify_replace_rtx (SET_SRC (set), from, to);
2350 if (!rtx_equal_p (src, SET_SRC (set))
2351 && validate_change (insn, &SET_SRC (set), src, 0))
2354 /* If we've failed to do replacement, have a single SET, don't already
2355 have a note, and have no special SET, add a REG_EQUAL note to not
2356 lose information. */
2357 if (!success && note == 0 && set != 0
2358 && GET_CODE (SET_DEST (set)) != ZERO_EXTRACT
2359 && GET_CODE (SET_DEST (set)) != STRICT_LOW_PART)
2360 note = set_unique_reg_note (insn, REG_EQUAL, copy_rtx (src));
2363 /* REG_EQUAL may get simplified into register.
2364 We don't allow that. Remove that note. This code ought
2365 not to happen, because previous code ought to synthesize
2366 reg-reg move, but be on the safe side. */
2367 if (note && REG_NOTE_KIND (note) == REG_EQUAL && REG_P (XEXP (note, 0)))
2368 remove_note (insn, note);
2373 /* Find a set of REGNOs that are available on entry to INSN's block. Returns
2374 NULL no such set is found. */
2376 static struct expr *
2377 find_avail_set (int regno, rtx insn)
2379 /* SET1 contains the last set found that can be returned to the caller for
2380 use in a substitution. */
2381 struct expr *set1 = 0;
2383 /* Loops are not possible here. To get a loop we would need two sets
2384 available at the start of the block containing INSN. i.e. we would
2385 need two sets like this available at the start of the block:
2387 (set (reg X) (reg Y))
2388 (set (reg Y) (reg X))
2390 This can not happen since the set of (reg Y) would have killed the
2391 set of (reg X) making it unavailable at the start of this block. */
2395 struct expr *set = lookup_set (regno, &set_hash_table);
2397 /* Find a set that is available at the start of the block
2398 which contains INSN. */
2401 if (TEST_BIT (cprop_avin[BLOCK_FOR_INSN (insn)->index],
2404 set = next_set (regno, set);
2407 /* If no available set was found we've reached the end of the
2408 (possibly empty) copy chain. */
2412 gcc_assert (GET_CODE (set->expr) == SET);
2414 src = SET_SRC (set->expr);
2416 /* We know the set is available.
2417 Now check that SRC is ANTLOC (i.e. none of the source operands
2418 have changed since the start of the block).
2420 If the source operand changed, we may still use it for the next
2421 iteration of this loop, but we may not use it for substitutions. */
2423 if (gcse_constant_p (src) || oprs_not_set_p (src, insn))
2426 /* If the source of the set is anything except a register, then
2427 we have reached the end of the copy chain. */
2431 /* Follow the copy chain, i.e. start another iteration of the loop
2432 and see if we have an available copy into SRC. */
2433 regno = REGNO (src);
2436 /* SET1 holds the last set that was available and anticipatable at
2441 /* Subroutine of cprop_insn that tries to propagate constants into
2442 JUMP_INSNS. JUMP must be a conditional jump. If SETCC is non-NULL
2443 it is the instruction that immediately precedes JUMP, and must be a
2444 single SET of a register. FROM is what we will try to replace,
2445 SRC is the constant we will try to substitute for it. Returns nonzero
2446 if a change was made. */
2449 cprop_jump (basic_block bb, rtx setcc, rtx jump, rtx from, rtx src)
2451 rtx new_rtx, set_src, note_src;
2452 rtx set = pc_set (jump);
2453 rtx note = find_reg_equal_equiv_note (jump);
2457 note_src = XEXP (note, 0);
2458 if (GET_CODE (note_src) == EXPR_LIST)
2459 note_src = NULL_RTX;
2461 else note_src = NULL_RTX;
2463 /* Prefer REG_EQUAL notes except those containing EXPR_LISTs. */
2464 set_src = note_src ? note_src : SET_SRC (set);
2466 /* First substitute the SETCC condition into the JUMP instruction,
2467 then substitute that given values into this expanded JUMP. */
2468 if (setcc != NULL_RTX
2469 && !modified_between_p (from, setcc, jump)
2470 && !modified_between_p (src, setcc, jump))
2473 rtx setcc_set = single_set (setcc);
2474 rtx setcc_note = find_reg_equal_equiv_note (setcc);
2475 setcc_src = (setcc_note && GET_CODE (XEXP (setcc_note, 0)) != EXPR_LIST)
2476 ? XEXP (setcc_note, 0) : SET_SRC (setcc_set);
2477 set_src = simplify_replace_rtx (set_src, SET_DEST (setcc_set),
2483 new_rtx = simplify_replace_rtx (set_src, from, src);
2485 /* If no simplification can be made, then try the next register. */
2486 if (rtx_equal_p (new_rtx, SET_SRC (set)))
2489 /* If this is now a no-op delete it, otherwise this must be a valid insn. */
2490 if (new_rtx == pc_rtx)
2494 /* Ensure the value computed inside the jump insn to be equivalent
2495 to one computed by setcc. */
2496 if (setcc && modified_in_p (new_rtx, setcc))
2498 if (! validate_unshare_change (jump, &SET_SRC (set), new_rtx, 0))
2500 /* When (some) constants are not valid in a comparison, and there
2501 are two registers to be replaced by constants before the entire
2502 comparison can be folded into a constant, we need to keep
2503 intermediate information in REG_EQUAL notes. For targets with
2504 separate compare insns, such notes are added by try_replace_reg.
2505 When we have a combined compare-and-branch instruction, however,
2506 we need to attach a note to the branch itself to make this
2507 optimization work. */
2509 if (!rtx_equal_p (new_rtx, note_src))
2510 set_unique_reg_note (jump, REG_EQUAL, copy_rtx (new_rtx));
2514 /* Remove REG_EQUAL note after simplification. */
2516 remove_note (jump, note);
2520 /* Delete the cc0 setter. */
2521 if (setcc != NULL && CC0_P (SET_DEST (single_set (setcc))))
2522 delete_insn (setcc);
2525 global_const_prop_count++;
2526 if (dump_file != NULL)
2529 "GLOBAL CONST-PROP: Replacing reg %d in jump_insn %d with constant ",
2530 REGNO (from), INSN_UID (jump));
2531 print_rtl (dump_file, src);
2532 fprintf (dump_file, "\n");
2534 purge_dead_edges (bb);
2536 /* If a conditional jump has been changed into unconditional jump, remove
2537 the jump and make the edge fallthru - this is always called in
2539 if (new_rtx != pc_rtx && simplejump_p (jump))
2544 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); ei_next (&ei))
2545 if (e->dest != EXIT_BLOCK_PTR
2546 && BB_HEAD (e->dest) == JUMP_LABEL (jump))
2548 e->flags |= EDGE_FALLTHRU;
2558 constprop_register (rtx insn, rtx from, rtx to)
2562 /* Check for reg or cc0 setting instructions followed by
2563 conditional branch instructions first. */
2564 if ((sset = single_set (insn)) != NULL
2566 && any_condjump_p (NEXT_INSN (insn)) && onlyjump_p (NEXT_INSN (insn)))
2568 rtx dest = SET_DEST (sset);
2569 if ((REG_P (dest) || CC0_P (dest))
2570 && cprop_jump (BLOCK_FOR_INSN (insn), insn, NEXT_INSN (insn), from, to))
2574 /* Handle normal insns next. */
2575 if (NONJUMP_INSN_P (insn)
2576 && try_replace_reg (from, to, insn))
2579 /* Try to propagate a CONST_INT into a conditional jump.
2580 We're pretty specific about what we will handle in this
2581 code, we can extend this as necessary over time.
2583 Right now the insn in question must look like
2584 (set (pc) (if_then_else ...)) */
2585 else if (any_condjump_p (insn) && onlyjump_p (insn))
2586 return cprop_jump (BLOCK_FOR_INSN (insn), NULL, insn, from, to);
2590 /* Perform constant and copy propagation on INSN.
2591 The result is nonzero if a change was made. */
2594 cprop_insn (rtx insn)
2596 struct reg_use *reg_used;
2604 note_uses (&PATTERN (insn), find_used_regs, NULL);
2606 note = find_reg_equal_equiv_note (insn);
2608 /* We may win even when propagating constants into notes. */
2610 find_used_regs (&XEXP (note, 0), NULL);
2612 for (reg_used = ®_use_table[0]; reg_use_count > 0;
2613 reg_used++, reg_use_count--)
2615 unsigned int regno = REGNO (reg_used->reg_rtx);
2619 /* If the register has already been set in this block, there's
2620 nothing we can do. */
2621 if (! oprs_not_set_p (reg_used->reg_rtx, insn))
2624 /* Find an assignment that sets reg_used and is available
2625 at the start of the block. */
2626 set = find_avail_set (regno, insn);
2631 /* ??? We might be able to handle PARALLELs. Later. */
2632 gcc_assert (GET_CODE (pat) == SET);
2634 src = SET_SRC (pat);
2636 /* Constant propagation. */
2637 if (gcse_constant_p (src))
2639 if (constprop_register (insn, reg_used->reg_rtx, src))
2642 global_const_prop_count++;
2643 if (dump_file != NULL)
2645 fprintf (dump_file, "GLOBAL CONST-PROP: Replacing reg %d in ", regno);
2646 fprintf (dump_file, "insn %d with constant ", INSN_UID (insn));
2647 print_rtl (dump_file, src);
2648 fprintf (dump_file, "\n");
2650 if (INSN_DELETED_P (insn))
2654 else if (REG_P (src)
2655 && REGNO (src) >= FIRST_PSEUDO_REGISTER
2656 && REGNO (src) != regno)
2658 if (try_replace_reg (reg_used->reg_rtx, src, insn))
2661 global_copy_prop_count++;
2662 if (dump_file != NULL)
2664 fprintf (dump_file, "GLOBAL COPY-PROP: Replacing reg %d in insn %d",
2665 regno, INSN_UID (insn));
2666 fprintf (dump_file, " with reg %d\n", REGNO (src));
2669 /* The original insn setting reg_used may or may not now be
2670 deletable. We leave the deletion to flow. */
2671 /* FIXME: If it turns out that the insn isn't deletable,
2672 then we may have unnecessarily extended register lifetimes
2673 and made things worse. */
2678 if (changed && DEBUG_INSN_P (insn))
2684 /* Like find_used_regs, but avoid recording uses that appear in
2685 input-output contexts such as zero_extract or pre_dec. This
2686 restricts the cases we consider to those for which local cprop
2687 can legitimately make replacements. */
2690 local_cprop_find_used_regs (rtx *xptr, void *data)
2697 switch (GET_CODE (x))
2701 case STRICT_LOW_PART:
2710 /* Can only legitimately appear this early in the context of
2711 stack pushes for function arguments, but handle all of the
2712 codes nonetheless. */
2716 /* Setting a subreg of a register larger than word_mode leaves
2717 the non-written words unchanged. */
2718 if (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) > BITS_PER_WORD)
2726 find_used_regs (xptr, data);
2729 /* Try to perform local const/copy propagation on X in INSN. */
2732 do_local_cprop (rtx x, rtx insn)
2734 rtx newreg = NULL, newcnst = NULL;
2736 /* Rule out USE instructions and ASM statements as we don't want to
2737 change the hard registers mentioned. */
2739 && (REGNO (x) >= FIRST_PSEUDO_REGISTER
2740 || (GET_CODE (PATTERN (insn)) != USE
2741 && asm_noperands (PATTERN (insn)) < 0)))
2743 cselib_val *val = cselib_lookup (x, GET_MODE (x), 0);
2744 struct elt_loc_list *l;
2748 for (l = val->locs; l; l = l->next)
2750 rtx this_rtx = l->loc;
2753 if (gcse_constant_p (this_rtx))
2755 if (REG_P (this_rtx) && REGNO (this_rtx) >= FIRST_PSEUDO_REGISTER
2756 /* Don't copy propagate if it has attached REG_EQUIV note.
2757 At this point this only function parameters should have
2758 REG_EQUIV notes and if the argument slot is used somewhere
2759 explicitly, it means address of parameter has been taken,
2760 so we should not extend the lifetime of the pseudo. */
2761 && (!(note = find_reg_note (l->setting_insn, REG_EQUIV, NULL_RTX))
2762 || ! MEM_P (XEXP (note, 0))))
2765 if (newcnst && constprop_register (insn, x, newcnst))
2767 if (dump_file != NULL)
2769 fprintf (dump_file, "LOCAL CONST-PROP: Replacing reg %d in ",
2771 fprintf (dump_file, "insn %d with constant ",
2773 print_rtl (dump_file, newcnst);
2774 fprintf (dump_file, "\n");
2776 local_const_prop_count++;
2779 else if (newreg && newreg != x && try_replace_reg (x, newreg, insn))
2781 if (dump_file != NULL)
2784 "LOCAL COPY-PROP: Replacing reg %d in insn %d",
2785 REGNO (x), INSN_UID (insn));
2786 fprintf (dump_file, " with reg %d\n", REGNO (newreg));
2788 local_copy_prop_count++;
2795 /* Do local const/copy propagation (i.e. within each basic block). */
2798 local_cprop_pass (void)
2802 struct reg_use *reg_used;
2803 bool changed = false;
2808 FOR_BB_INSNS (bb, insn)
2812 rtx note = find_reg_equal_equiv_note (insn);
2816 note_uses (&PATTERN (insn), local_cprop_find_used_regs,
2819 local_cprop_find_used_regs (&XEXP (note, 0), NULL);
2821 for (reg_used = ®_use_table[0]; reg_use_count > 0;
2822 reg_used++, reg_use_count--)
2824 if (do_local_cprop (reg_used->reg_rtx, insn))
2830 if (INSN_DELETED_P (insn))
2833 while (reg_use_count);
2835 cselib_process_insn (insn);
2838 /* Forget everything at the end of a basic block. */
2839 cselib_clear_table ();
2847 /* Similar to get_condition, only the resulting condition must be
2848 valid at JUMP, instead of at EARLIEST.
2850 This differs from noce_get_condition in ifcvt.c in that we prefer not to
2851 settle for the condition variable in the jump instruction being integral.
2852 We prefer to be able to record the value of a user variable, rather than
2853 the value of a temporary used in a condition. This could be solved by
2854 recording the value of *every* register scanned by canonicalize_condition,
2855 but this would require some code reorganization. */
2858 fis_get_condition (rtx jump)
2860 return get_condition (jump, NULL, false, true);
2863 /* Check the comparison COND to see if we can safely form an implicit set from
2864 it. COND is either an EQ or NE comparison. */
2867 implicit_set_cond_p (const_rtx cond)
2869 const enum machine_mode mode = GET_MODE (XEXP (cond, 0));
2870 const_rtx cst = XEXP (cond, 1);
2872 /* We can't perform this optimization if either operand might be or might
2873 contain a signed zero. */
2874 if (HONOR_SIGNED_ZEROS (mode))
2876 /* It is sufficient to check if CST is or contains a zero. We must
2877 handle float, complex, and vector. If any subpart is a zero, then
2878 the optimization can't be performed. */
2879 /* ??? The complex and vector checks are not implemented yet. We just
2880 always return zero for them. */
2881 if (GET_CODE (cst) == CONST_DOUBLE)
2884 REAL_VALUE_FROM_CONST_DOUBLE (d, cst);
2885 if (REAL_VALUES_EQUAL (d, dconst0))
2892 return gcse_constant_p (cst);
2895 /* Find the implicit sets of a function. An "implicit set" is a constraint
2896 on the value of a variable, implied by a conditional jump. For example,
2897 following "if (x == 2)", the then branch may be optimized as though the
2898 conditional performed an "explicit set", in this example, "x = 2". This
2899 function records the set patterns that are implicit at the start of each
2902 FIXME: This would be more effective if critical edges are pre-split. As
2903 it is now, we can't record implicit sets for blocks that have
2904 critical successor edges. This results in missed optimizations
2905 and in more (unnecessary) work in cfgcleanup.c:thread_jump(). */
2908 find_implicit_sets (void)
2910 basic_block bb, dest;
2916 /* Check for more than one successor. */
2917 if (EDGE_COUNT (bb->succs) > 1)
2919 cond = fis_get_condition (BB_END (bb));
2922 && (GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
2923 && REG_P (XEXP (cond, 0))
2924 && REGNO (XEXP (cond, 0)) >= FIRST_PSEUDO_REGISTER
2925 && implicit_set_cond_p (cond))
2927 dest = GET_CODE (cond) == EQ ? BRANCH_EDGE (bb)->dest
2928 : FALLTHRU_EDGE (bb)->dest;
2931 /* Record nothing for a critical edge. */
2932 && single_pred_p (dest)
2933 && dest != EXIT_BLOCK_PTR)
2935 new_rtx = gen_rtx_SET (VOIDmode, XEXP (cond, 0),
2937 implicit_sets[dest->index] = new_rtx;
2940 fprintf(dump_file, "Implicit set of reg %d in ",
2941 REGNO (XEXP (cond, 0)));
2942 fprintf(dump_file, "basic block %d\n", dest->index);
2950 fprintf (dump_file, "Found %d implicit sets\n", count);
2953 /* Bypass conditional jumps. */
2955 /* The value of last_basic_block at the beginning of the jump_bypass
2956 pass. The use of redirect_edge_and_branch_force may introduce new
2957 basic blocks, but the data flow analysis is only valid for basic
2958 block indices less than bypass_last_basic_block. */
2960 static int bypass_last_basic_block;
2962 /* Find a set of REGNO to a constant that is available at the end of basic
2963 block BB. Returns NULL if no such set is found. Based heavily upon
2966 static struct expr *
2967 find_bypass_set (int regno, int bb)
2969 struct expr *result = 0;
2974 struct expr *set = lookup_set (regno, &set_hash_table);
2978 if (TEST_BIT (cprop_avout[bb], set->bitmap_index))
2980 set = next_set (regno, set);
2986 gcc_assert (GET_CODE (set->expr) == SET);
2988 src = SET_SRC (set->expr);
2989 if (gcse_constant_p (src))
2995 regno = REGNO (src);
3001 /* Subroutine of bypass_block that checks whether a pseudo is killed by
3002 any of the instructions inserted on an edge. Jump bypassing places
3003 condition code setters on CFG edges using insert_insn_on_edge. This
3004 function is required to check that our data flow analysis is still
3005 valid prior to commit_edge_insertions. */
3008 reg_killed_on_edge (const_rtx reg, const_edge e)
3012 for (insn = e->insns.r; insn; insn = NEXT_INSN (insn))
3013 if (INSN_P (insn) && reg_set_p (reg, insn))
3019 /* Subroutine of bypass_conditional_jumps that attempts to bypass the given
3020 basic block BB which has more than one predecessor. If not NULL, SETCC
3021 is the first instruction of BB, which is immediately followed by JUMP_INSN
3022 JUMP. Otherwise, SETCC is NULL, and JUMP is the first insn of BB.
3023 Returns nonzero if a change was made.
3025 During the jump bypassing pass, we may place copies of SETCC instructions
3026 on CFG edges. The following routine must be careful to pay attention to
3027 these inserted insns when performing its transformations. */
3030 bypass_block (basic_block bb, rtx setcc, rtx jump)
3035 int may_be_loop_header;
3039 insn = (setcc != NULL) ? setcc : jump;
3041 /* Determine set of register uses in INSN. */
3043 note_uses (&PATTERN (insn), find_used_regs, NULL);
3044 note = find_reg_equal_equiv_note (insn);
3046 find_used_regs (&XEXP (note, 0), NULL);
3048 may_be_loop_header = false;
3049 FOR_EACH_EDGE (e, ei, bb->preds)
3050 if (e->flags & EDGE_DFS_BACK)
3052 may_be_loop_header = true;
3057 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
3061 if (e->flags & EDGE_COMPLEX)
3067 /* We can't redirect edges from new basic blocks. */
3068 if (e->src->index >= bypass_last_basic_block)
3074 /* The irreducible loops created by redirecting of edges entering the
3075 loop from outside would decrease effectiveness of some of the following
3076 optimizations, so prevent this. */
3077 if (may_be_loop_header
3078 && !(e->flags & EDGE_DFS_BACK))
3084 for (i = 0; i < reg_use_count; i++)
3086 struct reg_use *reg_used = ®_use_table[i];
3087 unsigned int regno = REGNO (reg_used->reg_rtx);
3088 basic_block dest, old_dest;
3092 set = find_bypass_set (regno, e->src->index);
3097 /* Check the data flow is valid after edge insertions. */
3098 if (e->insns.r && reg_killed_on_edge (reg_used->reg_rtx, e))
3101 src = SET_SRC (pc_set (jump));
3104 src = simplify_replace_rtx (src,
3105 SET_DEST (PATTERN (setcc)),
3106 SET_SRC (PATTERN (setcc)));
3108 new_rtx = simplify_replace_rtx (src, reg_used->reg_rtx,
3109 SET_SRC (set->expr));
3111 /* Jump bypassing may have already placed instructions on
3112 edges of the CFG. We can't bypass an outgoing edge that
3113 has instructions associated with it, as these insns won't
3114 get executed if the incoming edge is redirected. */
3116 if (new_rtx == pc_rtx)
3118 edest = FALLTHRU_EDGE (bb);
3119 dest = edest->insns.r ? NULL : edest->dest;
3121 else if (GET_CODE (new_rtx) == LABEL_REF)
3123 dest = BLOCK_FOR_INSN (XEXP (new_rtx, 0));
3124 /* Don't bypass edges containing instructions. */
3125 edest = find_edge (bb, dest);
3126 if (edest && edest->insns.r)
3132 /* Avoid unification of the edge with other edges from original
3133 branch. We would end up emitting the instruction on "both"
3136 if (dest && setcc && !CC0_P (SET_DEST (PATTERN (setcc)))
3137 && find_edge (e->src, dest))
3143 && dest != EXIT_BLOCK_PTR)
3145 redirect_edge_and_branch_force (e, dest);
3147 /* Copy the register setter to the redirected edge.
3148 Don't copy CC0 setters, as CC0 is dead after jump. */
3151 rtx pat = PATTERN (setcc);
3152 if (!CC0_P (SET_DEST (pat)))
3153 insert_insn_on_edge (copy_insn (pat), e);
3156 if (dump_file != NULL)
3158 fprintf (dump_file, "JUMP-BYPASS: Proved reg %d "
3159 "in jump_insn %d equals constant ",
3160 regno, INSN_UID (jump));
3161 print_rtl (dump_file, SET_SRC (set->expr));
3162 fprintf (dump_file, "\nBypass edge from %d->%d to %d\n",
3163 e->src->index, old_dest->index, dest->index);
3176 /* Find basic blocks with more than one predecessor that only contain a
3177 single conditional jump. If the result of the comparison is known at
3178 compile-time from any incoming edge, redirect that edge to the
3179 appropriate target. Returns nonzero if a change was made.
3181 This function is now mis-named, because we also handle indirect jumps. */
3184 bypass_conditional_jumps (void)
3192 /* Note we start at block 1. */
3193 if (ENTRY_BLOCK_PTR->next_bb == EXIT_BLOCK_PTR)
3196 bypass_last_basic_block = last_basic_block;
3197 mark_dfs_back_edges ();
3200 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb,
3201 EXIT_BLOCK_PTR, next_bb)
3203 /* Check for more than one predecessor. */
3204 if (!single_pred_p (bb))
3207 FOR_BB_INSNS (bb, insn)
3208 if (DEBUG_INSN_P (insn))
3210 else if (NONJUMP_INSN_P (insn))
3214 if (GET_CODE (PATTERN (insn)) != SET)
3217 dest = SET_DEST (PATTERN (insn));
3218 if (REG_P (dest) || CC0_P (dest))
3223 else if (JUMP_P (insn))
3225 if ((any_condjump_p (insn) || computed_jump_p (insn))
3226 && onlyjump_p (insn))
3227 changed |= bypass_block (bb, setcc, insn);
3230 else if (INSN_P (insn))
3235 /* If we bypassed any register setting insns, we inserted a
3236 copy on the redirected edge. These need to be committed. */
3238 commit_edge_insertions ();
3243 /* Compute PRE+LCM working variables. */
3245 /* Local properties of expressions. */
3246 /* Nonzero for expressions that are transparent in the block. */
3247 static sbitmap *transp;
3249 /* Nonzero for expressions that are computed (available) in the block. */
3250 static sbitmap *comp;
3252 /* Nonzero for expressions that are locally anticipatable in the block. */
3253 static sbitmap *antloc;
3255 /* Nonzero for expressions where this block is an optimal computation
3257 static sbitmap *pre_optimal;
3259 /* Nonzero for expressions which are redundant in a particular block. */
3260 static sbitmap *pre_redundant;
3262 /* Nonzero for expressions which should be inserted on a specific edge. */
3263 static sbitmap *pre_insert_map;
3265 /* Nonzero for expressions which should be deleted in a specific block. */
3266 static sbitmap *pre_delete_map;
3268 /* Contains the edge_list returned by pre_edge_lcm. */
3269 static struct edge_list *edge_list;
3271 /* Allocate vars used for PRE analysis. */
3274 alloc_pre_mem (int n_blocks, int n_exprs)
3276 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
3277 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
3278 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
3281 pre_redundant = NULL;
3282 pre_insert_map = NULL;
3283 pre_delete_map = NULL;
3284 ae_kill = sbitmap_vector_alloc (n_blocks, n_exprs);
3286 /* pre_insert and pre_delete are allocated later. */
3289 /* Free vars used for PRE analysis. */
3294 sbitmap_vector_free (transp);
3295 sbitmap_vector_free (comp);
3297 /* ANTLOC and AE_KILL are freed just after pre_lcm finishes. */
3300 sbitmap_vector_free (pre_optimal);
3302 sbitmap_vector_free (pre_redundant);
3304 sbitmap_vector_free (pre_insert_map);
3306 sbitmap_vector_free (pre_delete_map);
3308 transp = comp = NULL;
3309 pre_optimal = pre_redundant = pre_insert_map = pre_delete_map = NULL;
3312 /* Remove certain expressions from anticipatable and transparent
3313 sets of basic blocks that have incoming abnormal edge.
3314 For PRE remove potentially trapping expressions to avoid placing
3315 them on abnormal edges. For hoisting remove memory references that
3316 can be clobbered by calls. */
3319 prune_expressions (bool pre_p)
3321 sbitmap prune_exprs;
3325 prune_exprs = sbitmap_alloc (expr_hash_table.n_elems);
3326 sbitmap_zero (prune_exprs);
3327 for (ui = 0; ui < expr_hash_table.size; ui++)
3330 for (e = expr_hash_table.table[ui]; e != NULL; e = e->next_same_hash)
3332 /* Note potentially trapping expressions. */
3333 if (may_trap_p (e->expr))
3335 SET_BIT (prune_exprs, e->bitmap_index);
3339 if (!pre_p && MEM_P (e->expr))
3340 /* Note memory references that can be clobbered by a call.
3341 We do not split abnormal edges in hoisting, so would
3342 a memory reference get hoisted along an abnormal edge,
3343 it would be placed /before/ the call. Therefore, only
3344 constant memory references can be hoisted along abnormal
3347 if (GET_CODE (XEXP (e->expr, 0)) == SYMBOL_REF
3348 && CONSTANT_POOL_ADDRESS_P (XEXP (e->expr, 0)))
3351 if (MEM_READONLY_P (e->expr)
3352 && !MEM_VOLATILE_P (e->expr)
3353 && MEM_NOTRAP_P (e->expr))
3354 /* Constant memory reference, e.g., a PIC address. */
3357 /* ??? Optimally, we would use interprocedural alias
3358 analysis to determine if this mem is actually killed
3361 SET_BIT (prune_exprs, e->bitmap_index);
3371 /* If the current block is the destination of an abnormal edge, we
3372 kill all trapping (for PRE) and memory (for hoist) expressions
3373 because we won't be able to properly place the instruction on
3374 the edge. So make them neither anticipatable nor transparent.
3375 This is fairly conservative.
3377 ??? For hoisting it may be necessary to check for set-and-jump
3378 instructions here, not just for abnormal edges. The general problem
3379 is that when an expression cannot not be placed right at the end of
3380 a basic block we should account for any side-effects of a subsequent
3381 jump instructions that could clobber the expression. It would
3382 be best to implement this check along the lines of
3383 hoist_expr_reaches_here_p where the target block is already known
3384 and, hence, there's no need to conservatively prune expressions on
3385 "intermediate" set-and-jump instructions. */
3386 FOR_EACH_EDGE (e, ei, bb->preds)
3387 if ((e->flags & EDGE_ABNORMAL)
3388 && (pre_p || CALL_P (BB_END (e->src))))
3390 sbitmap_difference (antloc[bb->index],
3391 antloc[bb->index], prune_exprs);
3392 sbitmap_difference (transp[bb->index],
3393 transp[bb->index], prune_exprs);
3398 sbitmap_free (prune_exprs);
3401 /* Top level routine to do the dataflow analysis needed by PRE. */
3404 compute_pre_data (void)
3408 compute_local_properties (transp, comp, antloc, &expr_hash_table);
3409 prune_expressions (true);
3410 sbitmap_vector_zero (ae_kill, last_basic_block);
3412 /* Compute ae_kill for each basic block using:
3419 sbitmap_a_or_b (ae_kill[bb->index], transp[bb->index], comp[bb->index]);
3420 sbitmap_not (ae_kill[bb->index], ae_kill[bb->index]);
3423 edge_list = pre_edge_lcm (expr_hash_table.n_elems, transp, comp, antloc,
3424 ae_kill, &pre_insert_map, &pre_delete_map);
3425 sbitmap_vector_free (antloc);
3427 sbitmap_vector_free (ae_kill);
3433 /* Return nonzero if an occurrence of expression EXPR in OCCR_BB would reach
3436 VISITED is a pointer to a working buffer for tracking which BB's have
3437 been visited. It is NULL for the top-level call.
3439 We treat reaching expressions that go through blocks containing the same
3440 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
3441 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
3442 2 as not reaching. The intent is to improve the probability of finding
3443 only one reaching expression and to reduce register lifetimes by picking
3444 the closest such expression. */
3447 pre_expr_reaches_here_p_work (basic_block occr_bb, struct expr *expr, basic_block bb, char *visited)
3452 FOR_EACH_EDGE (pred, ei, bb->preds)
3454 basic_block pred_bb = pred->src;
3456 if (pred->src == ENTRY_BLOCK_PTR
3457 /* Has predecessor has already been visited? */
3458 || visited[pred_bb->index])
3459 ;/* Nothing to do. */
3461 /* Does this predecessor generate this expression? */
3462 else if (TEST_BIT (comp[pred_bb->index], expr->bitmap_index))
3464 /* Is this the occurrence we're looking for?
3465 Note that there's only one generating occurrence per block
3466 so we just need to check the block number. */
3467 if (occr_bb == pred_bb)
3470 visited[pred_bb->index] = 1;
3472 /* Ignore this predecessor if it kills the expression. */
3473 else if (! TEST_BIT (transp[pred_bb->index], expr->bitmap_index))
3474 visited[pred_bb->index] = 1;
3476 /* Neither gen nor kill. */
3479 visited[pred_bb->index] = 1;
3480 if (pre_expr_reaches_here_p_work (occr_bb, expr, pred_bb, visited))
3485 /* All paths have been checked. */
3489 /* The wrapper for pre_expr_reaches_here_work that ensures that any
3490 memory allocated for that function is returned. */
3493 pre_expr_reaches_here_p (basic_block occr_bb, struct expr *expr, basic_block bb)
3496 char *visited = XCNEWVEC (char, last_basic_block);
3498 rval = pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited);
3505 /* Given an expr, generate RTL which we can insert at the end of a BB,
3506 or on an edge. Set the block number of any insns generated to
3510 process_insert_insn (struct expr *expr)
3512 rtx reg = expr->reaching_reg;
3513 rtx exp = copy_rtx (expr->expr);
3518 /* If the expression is something that's an operand, like a constant,
3519 just copy it to a register. */
3520 if (general_operand (exp, GET_MODE (reg)))
3521 emit_move_insn (reg, exp);
3523 /* Otherwise, make a new insn to compute this expression and make sure the
3524 insn will be recognized (this also adds any needed CLOBBERs). Copy the
3525 expression to make sure we don't have any sharing issues. */
3528 rtx insn = emit_insn (gen_rtx_SET (VOIDmode, reg, exp));
3530 if (insn_invalid_p (insn))
3541 /* Add EXPR to the end of basic block BB.
3543 This is used by both the PRE and code hoisting. */
3546 insert_insn_end_basic_block (struct expr *expr, basic_block bb)
3548 rtx insn = BB_END (bb);
3550 rtx reg = expr->reaching_reg;
3551 int regno = REGNO (reg);
3554 pat = process_insert_insn (expr);
3555 gcc_assert (pat && INSN_P (pat));
3558 while (NEXT_INSN (pat_end) != NULL_RTX)
3559 pat_end = NEXT_INSN (pat_end);
3561 /* If the last insn is a jump, insert EXPR in front [taking care to
3562 handle cc0, etc. properly]. Similarly we need to care trapping
3563 instructions in presence of non-call exceptions. */
3566 || (NONJUMP_INSN_P (insn)
3567 && (!single_succ_p (bb)
3568 || single_succ_edge (bb)->flags & EDGE_ABNORMAL)))
3574 /* If this is a jump table, then we can't insert stuff here. Since
3575 we know the previous real insn must be the tablejump, we insert
3576 the new instruction just before the tablejump. */
3577 if (GET_CODE (PATTERN (insn)) == ADDR_VEC
3578 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
3579 insn = prev_real_insn (insn);
3582 /* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts
3583 if cc0 isn't set. */
3584 note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
3586 insn = XEXP (note, 0);
3589 rtx maybe_cc0_setter = prev_nonnote_insn (insn);
3590 if (maybe_cc0_setter
3591 && INSN_P (maybe_cc0_setter)
3592 && sets_cc0_p (PATTERN (maybe_cc0_setter)))
3593 insn = maybe_cc0_setter;
3596 /* FIXME: What if something in cc0/jump uses value set in new insn? */
3597 new_insn = emit_insn_before_noloc (pat, insn, bb);
3600 /* Likewise if the last insn is a call, as will happen in the presence
3601 of exception handling. */
3602 else if (CALL_P (insn)
3603 && (!single_succ_p (bb)
3604 || single_succ_edge (bb)->flags & EDGE_ABNORMAL))
3606 /* Keeping in mind targets with small register classes and parameters
3607 in registers, we search backward and place the instructions before
3608 the first parameter is loaded. Do this for everyone for consistency
3609 and a presumption that we'll get better code elsewhere as well. */
3611 /* Since different machines initialize their parameter registers
3612 in different orders, assume nothing. Collect the set of all
3613 parameter registers. */
3614 insn = find_first_parameter_load (insn, BB_HEAD (bb));
3616 /* If we found all the parameter loads, then we want to insert
3617 before the first parameter load.
3619 If we did not find all the parameter loads, then we might have
3620 stopped on the head of the block, which could be a CODE_LABEL.
3621 If we inserted before the CODE_LABEL, then we would be putting
3622 the insn in the wrong basic block. In that case, put the insn
3623 after the CODE_LABEL. Also, respect NOTE_INSN_BASIC_BLOCK. */
3624 while (LABEL_P (insn)
3625 || NOTE_INSN_BASIC_BLOCK_P (insn))
3626 insn = NEXT_INSN (insn);
3628 new_insn = emit_insn_before_noloc (pat, insn, bb);
3631 new_insn = emit_insn_after_noloc (pat, insn, bb);
3636 add_label_notes (PATTERN (pat), new_insn);
3639 pat = NEXT_INSN (pat);
3642 gcse_create_count++;
3646 fprintf (dump_file, "PRE/HOIST: end of bb %d, insn %d, ",
3647 bb->index, INSN_UID (new_insn));
3648 fprintf (dump_file, "copying expression %d to reg %d\n",
3649 expr->bitmap_index, regno);
3653 /* Insert partially redundant expressions on edges in the CFG to make
3654 the expressions fully redundant. */
3657 pre_edge_insert (struct edge_list *edge_list, struct expr **index_map)
3659 int e, i, j, num_edges, set_size, did_insert = 0;
3662 /* Where PRE_INSERT_MAP is nonzero, we add the expression on that edge
3663 if it reaches any of the deleted expressions. */
3665 set_size = pre_insert_map[0]->size;
3666 num_edges = NUM_EDGES (edge_list);
3667 inserted = sbitmap_vector_alloc (num_edges, expr_hash_table.n_elems);
3668 sbitmap_vector_zero (inserted, num_edges);
3670 for (e = 0; e < num_edges; e++)
3673 basic_block bb = INDEX_EDGE_PRED_BB (edge_list, e);
3675 for (i = indx = 0; i < set_size; i++, indx += SBITMAP_ELT_BITS)
3677 SBITMAP_ELT_TYPE insert = pre_insert_map[e]->elms[i];
3679 for (j = indx; insert && j < (int) expr_hash_table.n_elems; j++, insert >>= 1)
3680 if ((insert & 1) != 0 && index_map[j]->reaching_reg != NULL_RTX)
3682 struct expr *expr = index_map[j];
3685 /* Now look at each deleted occurrence of this expression. */
3686 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
3688 if (! occr->deleted_p)
3691 /* Insert this expression on this edge if it would
3692 reach the deleted occurrence in BB. */
3693 if (!TEST_BIT (inserted[e], j))
3696 edge eg = INDEX_EDGE (edge_list, e);
3698 /* We can't insert anything on an abnormal and
3699 critical edge, so we insert the insn at the end of
3700 the previous block. There are several alternatives
3701 detailed in Morgans book P277 (sec 10.5) for
3702 handling this situation. This one is easiest for
3705 if (eg->flags & EDGE_ABNORMAL)
3706 insert_insn_end_basic_block (index_map[j], bb);
3709 insn = process_insert_insn (index_map[j]);
3710 insert_insn_on_edge (insn, eg);
3715 fprintf (dump_file, "PRE: edge (%d,%d), ",
3717 INDEX_EDGE_SUCC_BB (edge_list, e)->index);
3718 fprintf (dump_file, "copy expression %d\n",
3719 expr->bitmap_index);
3722 update_ld_motion_stores (expr);
3723 SET_BIT (inserted[e], j);
3725 gcse_create_count++;
3732 sbitmap_vector_free (inserted);
3736 /* Copy the result of EXPR->EXPR generated by INSN to EXPR->REACHING_REG.
3737 Given "old_reg <- expr" (INSN), instead of adding after it
3738 reaching_reg <- old_reg
3739 it's better to do the following:
3740 reaching_reg <- expr
3741 old_reg <- reaching_reg
3742 because this way copy propagation can discover additional PRE
3743 opportunities. But if this fails, we try the old way.
3744 When "expr" is a store, i.e.
3745 given "MEM <- old_reg", instead of adding after it
3746 reaching_reg <- old_reg
3747 it's better to add it before as follows:
3748 reaching_reg <- old_reg
3749 MEM <- reaching_reg. */
3752 pre_insert_copy_insn (struct expr *expr, rtx insn)
3754 rtx reg = expr->reaching_reg;
3755 int regno = REGNO (reg);
3756 int indx = expr->bitmap_index;
3757 rtx pat = PATTERN (insn);
3758 rtx set, first_set, new_insn;
3762 /* This block matches the logic in hash_scan_insn. */
3763 switch (GET_CODE (pat))
3770 /* Search through the parallel looking for the set whose
3771 source was the expression that we're interested in. */
3772 first_set = NULL_RTX;
3774 for (i = 0; i < XVECLEN (pat, 0); i++)
3776 rtx x = XVECEXP (pat, 0, i);
3777 if (GET_CODE (x) == SET)
3779 /* If the source was a REG_EQUAL or REG_EQUIV note, we
3780 may not find an equivalent expression, but in this
3781 case the PARALLEL will have a single set. */
3782 if (first_set == NULL_RTX)
3784 if (expr_equiv_p (SET_SRC (x), expr->expr))
3792 gcc_assert (first_set);
3793 if (set == NULL_RTX)
3801 if (REG_P (SET_DEST (set)))
3803 old_reg = SET_DEST (set);
3804 /* Check if we can modify the set destination in the original insn. */
3805 if (validate_change (insn, &SET_DEST (set), reg, 0))
3807 new_insn = gen_move_insn (old_reg, reg);
3808 new_insn = emit_insn_after (new_insn, insn);
3812 new_insn = gen_move_insn (reg, old_reg);
3813 new_insn = emit_insn_after (new_insn, insn);
3816 else /* This is possible only in case of a store to memory. */
3818 old_reg = SET_SRC (set);
3819 new_insn = gen_move_insn (reg, old_reg);
3821 /* Check if we can modify the set source in the original insn. */
3822 if (validate_change (insn, &SET_SRC (set), reg, 0))
3823 new_insn = emit_insn_before (new_insn, insn);
3825 new_insn = emit_insn_after (new_insn, insn);
3828 gcse_create_count++;
3832 "PRE: bb %d, insn %d, copy expression %d in insn %d to reg %d\n",
3833 BLOCK_FOR_INSN (insn)->index, INSN_UID (new_insn), indx,
3834 INSN_UID (insn), regno);
3837 /* Copy available expressions that reach the redundant expression
3838 to `reaching_reg'. */
3841 pre_insert_copies (void)
3843 unsigned int i, added_copy;
3848 /* For each available expression in the table, copy the result to
3849 `reaching_reg' if the expression reaches a deleted one.
3851 ??? The current algorithm is rather brute force.
3852 Need to do some profiling. */
3854 for (i = 0; i < expr_hash_table.size; i++)
3855 for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
3857 /* If the basic block isn't reachable, PPOUT will be TRUE. However,
3858 we don't want to insert a copy here because the expression may not
3859 really be redundant. So only insert an insn if the expression was
3860 deleted. This test also avoids further processing if the
3861 expression wasn't deleted anywhere. */
3862 if (expr->reaching_reg == NULL)
3865 /* Set when we add a copy for that expression. */
3868 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
3870 if (! occr->deleted_p)
3873 for (avail = expr->avail_occr; avail != NULL; avail = avail->next)
3875 rtx insn = avail->insn;
3877 /* No need to handle this one if handled already. */
3878 if (avail->copied_p)
3881 /* Don't handle this one if it's a redundant one. */
3882 if (INSN_DELETED_P (insn))
3885 /* Or if the expression doesn't reach the deleted one. */
3886 if (! pre_expr_reaches_here_p (BLOCK_FOR_INSN (avail->insn),
3888 BLOCK_FOR_INSN (occr->insn)))
3893 /* Copy the result of avail to reaching_reg. */
3894 pre_insert_copy_insn (expr, insn);
3895 avail->copied_p = 1;
3900 update_ld_motion_stores (expr);
3904 /* Emit move from SRC to DEST noting the equivalence with expression computed
3907 gcse_emit_move_after (rtx src, rtx dest, rtx insn)
3910 rtx set = single_set (insn), set2;
3914 /* This should never fail since we're creating a reg->reg copy
3915 we've verified to be valid. */
3917 new_rtx = emit_insn_after (gen_move_insn (dest, src), insn);
3919 /* Note the equivalence for local CSE pass. */
3920 set2 = single_set (new_rtx);
3921 if (!set2 || !rtx_equal_p (SET_DEST (set2), dest))
3923 if ((note = find_reg_equal_equiv_note (insn)))
3924 eqv = XEXP (note, 0);
3926 eqv = SET_SRC (set);
3928 set_unique_reg_note (new_rtx, REG_EQUAL, copy_insn_1 (eqv));
3933 /* Delete redundant computations.
3934 Deletion is done by changing the insn to copy the `reaching_reg' of
3935 the expression into the result of the SET. It is left to later passes
3936 (cprop, cse2, flow, combine, regmove) to propagate the copy or eliminate it.
3938 Returns nonzero if a change is made. */
3949 for (i = 0; i < expr_hash_table.size; i++)
3950 for (expr = expr_hash_table.table[i];
3952 expr = expr->next_same_hash)
3954 int indx = expr->bitmap_index;
3956 /* We only need to search antic_occr since we require
3959 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
3961 rtx insn = occr->insn;
3963 basic_block bb = BLOCK_FOR_INSN (insn);
3965 /* We only delete insns that have a single_set. */
3966 if (TEST_BIT (pre_delete_map[bb->index], indx)
3967 && (set = single_set (insn)) != 0
3968 && dbg_cnt (pre_insn))
3970 /* Create a pseudo-reg to store the result of reaching
3971 expressions into. Get the mode for the new pseudo from
3972 the mode of the original destination pseudo. */
3973 if (expr->reaching_reg == NULL)
3974 expr->reaching_reg = gen_reg_rtx_and_attrs (SET_DEST (set));
3976 gcse_emit_move_after (expr->reaching_reg, SET_DEST (set), insn);
3978 occr->deleted_p = 1;
3985 "PRE: redundant insn %d (expression %d) in ",
3986 INSN_UID (insn), indx);
3987 fprintf (dump_file, "bb %d, reaching reg is %d\n",
3988 bb->index, REGNO (expr->reaching_reg));
3997 /* Perform GCSE optimizations using PRE.
3998 This is called by one_pre_gcse_pass after all the dataflow analysis
4001 This is based on the original Morel-Renvoise paper Fred Chow's thesis, and
4002 lazy code motion from Knoop, Ruthing and Steffen as described in Advanced
4003 Compiler Design and Implementation.
4005 ??? A new pseudo reg is created to hold the reaching expression. The nice
4006 thing about the classical approach is that it would try to use an existing
4007 reg. If the register can't be adequately optimized [i.e. we introduce
4008 reload problems], one could add a pass here to propagate the new register
4011 ??? We don't handle single sets in PARALLELs because we're [currently] not
4012 able to copy the rest of the parallel when we insert copies to create full
4013 redundancies from partial redundancies. However, there's no reason why we
4014 can't handle PARALLELs in the cases where there are no partial
4021 int did_insert, changed;
4022 struct expr **index_map;
4025 /* Compute a mapping from expression number (`bitmap_index') to
4026 hash table entry. */
4028 index_map = XCNEWVEC (struct expr *, expr_hash_table.n_elems);
4029 for (i = 0; i < expr_hash_table.size; i++)
4030 for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
4031 index_map[expr->bitmap_index] = expr;
4033 /* Delete the redundant insns first so that
4034 - we know what register to use for the new insns and for the other
4035 ones with reaching expressions
4036 - we know which insns are redundant when we go to create copies */
4038 changed = pre_delete ();
4039 did_insert = pre_edge_insert (edge_list, index_map);
4041 /* In other places with reaching expressions, copy the expression to the
4042 specially allocated pseudo-reg that reaches the redundant expr. */
4043 pre_insert_copies ();
4046 commit_edge_insertions ();
4054 /* Top level routine to perform one PRE GCSE pass.
4056 Return nonzero if a change was made. */
4059 one_pre_gcse_pass (void)
4063 gcse_subst_count = 0;
4064 gcse_create_count = 0;
4066 /* Return if there's nothing to do, or it is too expensive. */
4067 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
4068 || is_too_expensive (_("PRE disabled")))
4071 /* We need alias. */
4072 init_alias_analysis ();
4075 gcc_obstack_init (&gcse_obstack);
4078 alloc_hash_table (&expr_hash_table, 0);
4079 add_noreturn_fake_exit_edges ();
4081 compute_ld_motion_mems ();
4083 compute_hash_table (&expr_hash_table);
4084 trim_ld_motion_mems ();
4086 dump_hash_table (dump_file, "Expression", &expr_hash_table);
4088 if (expr_hash_table.n_elems > 0)
4090 alloc_pre_mem (last_basic_block, expr_hash_table.n_elems);
4091 compute_pre_data ();
4092 changed |= pre_gcse ();
4093 free_edge_list (edge_list);
4098 remove_fake_exit_edges ();
4099 free_hash_table (&expr_hash_table);
4102 obstack_free (&gcse_obstack, NULL);
4104 /* We are finished with alias. */
4105 end_alias_analysis ();
4109 fprintf (dump_file, "PRE GCSE of %s, %d basic blocks, %d bytes needed, ",
4110 current_function_name (), n_basic_blocks, bytes_used);
4111 fprintf (dump_file, "%d substs, %d insns created\n",
4112 gcse_subst_count, gcse_create_count);
4118 /* If X contains any LABEL_REF's, add REG_LABEL_OPERAND notes for them
4119 to INSN. If such notes are added to an insn which references a
4120 CODE_LABEL, the LABEL_NUSES count is incremented. We have to add
4121 that note, because the following loop optimization pass requires
4124 /* ??? If there was a jump optimization pass after gcse and before loop,
4125 then we would not need to do this here, because jump would add the
4126 necessary REG_LABEL_OPERAND and REG_LABEL_TARGET notes. */
4129 add_label_notes (rtx x, rtx insn)
4131 enum rtx_code code = GET_CODE (x);
4135 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
4137 /* This code used to ignore labels that referred to dispatch tables to
4138 avoid flow generating (slightly) worse code.
4140 We no longer ignore such label references (see LABEL_REF handling in
4141 mark_jump_label for additional information). */
4143 /* There's no reason for current users to emit jump-insns with
4144 such a LABEL_REF, so we don't have to handle REG_LABEL_TARGET
4146 gcc_assert (!JUMP_P (insn));
4147 add_reg_note (insn, REG_LABEL_OPERAND, XEXP (x, 0));
4149 if (LABEL_P (XEXP (x, 0)))
4150 LABEL_NUSES (XEXP (x, 0))++;
4155 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
4158 add_label_notes (XEXP (x, i), insn);
4159 else if (fmt[i] == 'E')
4160 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4161 add_label_notes (XVECEXP (x, i, j), insn);
4165 /* Code Hoisting variables and subroutines. */
4167 /* Very busy expressions. */
4168 static sbitmap *hoist_vbein;
4169 static sbitmap *hoist_vbeout;
4171 /* ??? We could compute post dominators and run this algorithm in
4172 reverse to perform tail merging, doing so would probably be
4173 more effective than the tail merging code in jump.c.
4175 It's unclear if tail merging could be run in parallel with
4176 code hoisting. It would be nice. */
4178 /* Allocate vars used for code hoisting analysis. */
4181 alloc_code_hoist_mem (int n_blocks, int n_exprs)
4183 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
4184 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
4185 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
4187 hoist_vbein = sbitmap_vector_alloc (n_blocks, n_exprs);
4188 hoist_vbeout = sbitmap_vector_alloc (n_blocks, n_exprs);
4191 /* Free vars used for code hoisting analysis. */
4194 free_code_hoist_mem (void)
4196 sbitmap_vector_free (antloc);
4197 sbitmap_vector_free (transp);
4198 sbitmap_vector_free (comp);
4200 sbitmap_vector_free (hoist_vbein);
4201 sbitmap_vector_free (hoist_vbeout);
4203 free_dominance_info (CDI_DOMINATORS);
4206 /* Compute the very busy expressions at entry/exit from each block.
4208 An expression is very busy if all paths from a given point
4209 compute the expression. */
4212 compute_code_hoist_vbeinout (void)
4214 int changed, passes;
4217 sbitmap_vector_zero (hoist_vbeout, last_basic_block);
4218 sbitmap_vector_zero (hoist_vbein, last_basic_block);
4227 /* We scan the blocks in the reverse order to speed up
4229 FOR_EACH_BB_REVERSE (bb)
4231 if (bb->next_bb != EXIT_BLOCK_PTR)
4233 sbitmap_intersection_of_succs (hoist_vbeout[bb->index],
4234 hoist_vbein, bb->index);
4236 /* Include expressions in VBEout that are calculated
4237 in BB and available at its end. */
4238 sbitmap_a_or_b (hoist_vbeout[bb->index],
4239 hoist_vbeout[bb->index], comp[bb->index]);
4242 changed |= sbitmap_a_or_b_and_c_cg (hoist_vbein[bb->index],
4244 hoist_vbeout[bb->index],
4253 fprintf (dump_file, "hoisting vbeinout computation: %d passes\n", passes);
4257 fprintf (dump_file, "vbein (%d): ", bb->index);
4258 dump_sbitmap_file (dump_file, hoist_vbein[bb->index]);
4259 fprintf (dump_file, "vbeout(%d): ", bb->index);
4260 dump_sbitmap_file (dump_file, hoist_vbeout[bb->index]);
4265 /* Top level routine to do the dataflow analysis needed by code hoisting. */
4268 compute_code_hoist_data (void)
4270 compute_local_properties (transp, comp, antloc, &expr_hash_table);
4271 prune_expressions (false);
4272 compute_code_hoist_vbeinout ();
4273 calculate_dominance_info (CDI_DOMINATORS);
4275 fprintf (dump_file, "\n");
4278 /* Determine if the expression identified by EXPR_INDEX would
4279 reach BB unimpared if it was placed at the end of EXPR_BB.
4280 Stop the search if the expression would need to be moved more
4281 than DISTANCE instructions.
4283 It's unclear exactly what Muchnick meant by "unimpared". It seems
4284 to me that the expression must either be computed or transparent in
4285 *every* block in the path(s) from EXPR_BB to BB. Any other definition
4286 would allow the expression to be hoisted out of loops, even if
4287 the expression wasn't a loop invariant.
4289 Contrast this to reachability for PRE where an expression is
4290 considered reachable if *any* path reaches instead of *all*
4294 hoist_expr_reaches_here_p (basic_block expr_bb, int expr_index, basic_block bb,
4295 char *visited, int distance, int *bb_size)
4299 int visited_allocated_locally = 0;
4301 /* Terminate the search if distance, for which EXPR is allowed to move,
4305 distance -= bb_size[bb->index];
4311 gcc_assert (distance == 0);
4313 if (visited == NULL)
4315 visited_allocated_locally = 1;
4316 visited = XCNEWVEC (char, last_basic_block);
4319 FOR_EACH_EDGE (pred, ei, bb->preds)
4321 basic_block pred_bb = pred->src;
4323 if (pred->src == ENTRY_BLOCK_PTR)
4325 else if (pred_bb == expr_bb)
4327 else if (visited[pred_bb->index])
4330 else if (! TEST_BIT (transp[pred_bb->index], expr_index))
4336 visited[pred_bb->index] = 1;
4337 if (! hoist_expr_reaches_here_p (expr_bb, expr_index, pred_bb,
4338 visited, distance, bb_size))
4342 if (visited_allocated_locally)
4345 return (pred == NULL);
4348 /* Find occurence in BB. */
4349 static struct occr *
4350 find_occr_in_bb (struct occr *occr, basic_block bb)
4352 /* Find the right occurrence of this expression. */
4353 while (occr && BLOCK_FOR_INSN (occr->insn) != bb)
4359 /* Actually perform code hoisting. */
4364 basic_block bb, dominated;
4365 VEC (basic_block, heap) *dom_tree_walk;
4366 unsigned int dom_tree_walk_index;
4367 VEC (basic_block, heap) *domby;
4369 struct expr **index_map;
4375 /* Compute a mapping from expression number (`bitmap_index') to
4376 hash table entry. */
4378 index_map = XCNEWVEC (struct expr *, expr_hash_table.n_elems);
4379 for (i = 0; i < expr_hash_table.size; i++)
4380 for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
4381 index_map[expr->bitmap_index] = expr;
4383 /* Calculate sizes of basic blocks and note how far
4384 each instruction is from the start of its block. We then use this
4385 data to restrict distance an expression can travel. */
4387 to_bb_head = XCNEWVEC (int, get_max_uid ());
4388 bb_size = XCNEWVEC (int, last_basic_block);
4396 insn = BB_HEAD (bb);
4397 bb_end = BB_END (bb);
4400 while (insn != bb_end)
4402 /* Don't count debug instructions to avoid them affecting
4403 decision choices. */
4404 if (NONDEBUG_INSN_P (insn))
4405 to_bb_head[INSN_UID (insn)] = to_head++;
4407 insn = NEXT_INSN (insn);
4410 bb_size[bb->index] = to_head;
4413 gcc_assert (EDGE_COUNT (ENTRY_BLOCK_PTR->succs) == 1
4414 && (EDGE_SUCC (ENTRY_BLOCK_PTR, 0)->dest
4415 == ENTRY_BLOCK_PTR->next_bb));
4417 dom_tree_walk = get_all_dominated_blocks (CDI_DOMINATORS,
4418 ENTRY_BLOCK_PTR->next_bb);
4420 /* Walk over each basic block looking for potentially hoistable
4421 expressions, nothing gets hoisted from the entry block. */
4422 for (dom_tree_walk_index = 0;
4423 VEC_iterate (basic_block, dom_tree_walk, dom_tree_walk_index, bb);
4424 dom_tree_walk_index++)
4426 domby = get_dominated_to_depth (CDI_DOMINATORS, bb, MAX_HOIST_DEPTH);
4428 if (VEC_length (basic_block, domby) == 0)
4431 /* Examine each expression that is very busy at the exit of this
4432 block. These are the potentially hoistable expressions. */
4433 for (i = 0; i < hoist_vbeout[bb->index]->n_bits; i++)
4435 if (TEST_BIT (hoist_vbeout[bb->index], i))
4437 /* Current expression. */
4438 struct expr *expr = index_map[i];
4439 /* Number of occurences of EXPR that can be hoisted to BB. */
4441 /* Basic blocks that have occurences reachable from BB. */
4442 bitmap_head _from_bbs, *from_bbs = &_from_bbs;
4443 /* Occurences reachable from BB. */
4444 VEC (occr_t, heap) *occrs_to_hoist = NULL;
4445 /* We want to insert the expression into BB only once, so
4446 note when we've inserted it. */
4447 int insn_inserted_p;
4450 bitmap_initialize (from_bbs, 0);
4452 /* If an expression is computed in BB and is available at end of
4453 BB, hoist all occurences dominated by BB to BB. */
4454 if (TEST_BIT (comp[bb->index], i))
4456 occr = find_occr_in_bb (expr->antic_occr, bb);
4460 /* An occurence might've been already deleted
4461 while processing a dominator of BB. */
4462 if (occr->deleted_p)
4463 gcc_assert (MAX_HOIST_DEPTH > 1);
4466 gcc_assert (NONDEBUG_INSN_P (occr->insn));
4474 /* We've found a potentially hoistable expression, now
4475 we look at every block BB dominates to see if it
4476 computes the expression. */
4477 for (j = 0; VEC_iterate (basic_block, domby, j, dominated); j++)
4481 /* Ignore self dominance. */
4482 if (bb == dominated)
4484 /* We've found a dominated block, now see if it computes
4485 the busy expression and whether or not moving that
4486 expression to the "beginning" of that block is safe. */
4487 if (!TEST_BIT (antloc[dominated->index], i))
4490 occr = find_occr_in_bb (expr->antic_occr, dominated);
4493 /* An occurence might've been already deleted
4494 while processing a dominator of BB. */
4495 if (occr->deleted_p)
4497 gcc_assert (MAX_HOIST_DEPTH > 1);
4500 gcc_assert (NONDEBUG_INSN_P (occr->insn));
4502 max_distance = expr->max_distance;
4503 if (max_distance > 0)
4504 /* Adjust MAX_DISTANCE to account for the fact that
4505 OCCR won't have to travel all of DOMINATED, but
4507 max_distance += (bb_size[dominated->index]
4508 - to_bb_head[INSN_UID (occr->insn)]);
4510 /* Note if the expression would reach the dominated block
4511 unimpared if it was placed at the end of BB.
4513 Keep track of how many times this expression is hoistable
4514 from a dominated block into BB. */
4515 if (hoist_expr_reaches_here_p (bb, i, dominated, NULL,
4516 max_distance, bb_size))
4519 VEC_safe_push (occr_t, heap,
4520 occrs_to_hoist, occr);
4521 bitmap_set_bit (from_bbs, dominated->index);
4525 /* If we found more than one hoistable occurrence of this
4526 expression, then note it in the vector of expressions to
4527 hoist. It makes no sense to hoist things which are computed
4528 in only one BB, and doing so tends to pessimize register
4529 allocation. One could increase this value to try harder
4530 to avoid any possible code expansion due to register
4531 allocation issues; however experiments have shown that
4532 the vast majority of hoistable expressions are only movable
4533 from two successors, so raising this threshold is likely
4534 to nullify any benefit we get from code hoisting. */
4535 if (hoistable > 1 && dbg_cnt (hoist_insn))
4537 /* If (hoistable != VEC_length), then there is
4538 an occurence of EXPR in BB itself. Don't waste
4539 time looking for LCA in this case. */
4540 if ((unsigned) hoistable
4541 == VEC_length (occr_t, occrs_to_hoist))
4545 lca = nearest_common_dominator_for_set (CDI_DOMINATORS,
4548 /* Punt, it's better to hoist these occurences to
4550 VEC_free (occr_t, heap, occrs_to_hoist);
4554 /* Punt, no point hoisting a single occurence. */
4555 VEC_free (occr_t, heap, occrs_to_hoist);
4557 insn_inserted_p = 0;
4559 /* Walk through occurences of I'th expressions we want
4560 to hoist to BB and make the transformations. */
4562 VEC_iterate (occr_t, occrs_to_hoist, j, occr);
4568 gcc_assert (!occr->deleted_p);
4571 set = single_set (insn);
4574 /* Create a pseudo-reg to store the result of reaching
4575 expressions into. Get the mode for the new pseudo
4576 from the mode of the original destination pseudo.
4578 It is important to use new pseudos whenever we
4579 emit a set. This will allow reload to use
4580 rematerialization for such registers. */
4581 if (!insn_inserted_p)
4583 = gen_reg_rtx_and_attrs (SET_DEST (set));
4585 gcse_emit_move_after (expr->reaching_reg, SET_DEST (set),
4588 occr->deleted_p = 1;
4592 if (!insn_inserted_p)
4594 insert_insn_end_basic_block (expr, bb);
4595 insn_inserted_p = 1;
4599 VEC_free (occr_t, heap, occrs_to_hoist);
4600 bitmap_clear (from_bbs);
4603 VEC_free (basic_block, heap, domby);
4606 VEC_free (basic_block, heap, dom_tree_walk);
4614 /* Top level routine to perform one code hoisting (aka unification) pass
4616 Return nonzero if a change was made. */
4619 one_code_hoisting_pass (void)
4623 gcse_subst_count = 0;
4624 gcse_create_count = 0;
4626 /* Return if there's nothing to do, or it is too expensive. */
4627 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
4628 || is_too_expensive (_("GCSE disabled")))
4631 doing_code_hoisting_p = true;
4633 /* We need alias. */
4634 init_alias_analysis ();
4637 gcc_obstack_init (&gcse_obstack);
4640 alloc_hash_table (&expr_hash_table, 0);
4641 compute_hash_table (&expr_hash_table);
4643 dump_hash_table (dump_file, "Code Hosting Expressions", &expr_hash_table);
4645 if (expr_hash_table.n_elems > 0)
4647 alloc_code_hoist_mem (last_basic_block, expr_hash_table.n_elems);
4648 compute_code_hoist_data ();
4649 changed = hoist_code ();
4650 free_code_hoist_mem ();
4653 free_hash_table (&expr_hash_table);
4655 obstack_free (&gcse_obstack, NULL);
4657 /* We are finished with alias. */
4658 end_alias_analysis ();
4662 fprintf (dump_file, "HOIST of %s, %d basic blocks, %d bytes needed, ",
4663 current_function_name (), n_basic_blocks, bytes_used);
4664 fprintf (dump_file, "%d substs, %d insns created\n",
4665 gcse_subst_count, gcse_create_count);
4668 doing_code_hoisting_p = false;
4673 /* Here we provide the things required to do store motion towards
4674 the exit. In order for this to be effective, gcse also needed to
4675 be taught how to move a load when it is kill only by a store to itself.
4680 void foo(float scale)
4682 for (i=0; i<10; i++)
4686 'i' is both loaded and stored to in the loop. Normally, gcse cannot move
4687 the load out since its live around the loop, and stored at the bottom
4690 The 'Load Motion' referred to and implemented in this file is
4691 an enhancement to gcse which when using edge based lcm, recognizes
4692 this situation and allows gcse to move the load out of the loop.
4694 Once gcse has hoisted the load, store motion can then push this
4695 load towards the exit, and we end up with no loads or stores of 'i'
4699 pre_ldst_expr_hash (const void *p)
4701 int do_not_record_p = 0;
4702 const struct ls_expr *const x = (const struct ls_expr *) p;
4703 return hash_rtx (x->pattern, GET_MODE (x->pattern), &do_not_record_p, NULL, false);
4707 pre_ldst_expr_eq (const void *p1, const void *p2)
4709 const struct ls_expr *const ptr1 = (const struct ls_expr *) p1,
4710 *const ptr2 = (const struct ls_expr *) p2;
4711 return expr_equiv_p (ptr1->pattern, ptr2->pattern);
4714 /* This will search the ldst list for a matching expression. If it
4715 doesn't find one, we create one and initialize it. */
4717 static struct ls_expr *
4720 int do_not_record_p = 0;
4721 struct ls_expr * ptr;
4726 hash = hash_rtx (x, GET_MODE (x), &do_not_record_p,
4727 NULL, /*have_reg_qty=*/false);
4730 slot = htab_find_slot_with_hash (pre_ldst_table, &e, hash, INSERT);
4732 return (struct ls_expr *)*slot;
4734 ptr = XNEW (struct ls_expr);
4736 ptr->next = pre_ldst_mems;
4739 ptr->pattern_regs = NULL_RTX;
4740 ptr->loads = NULL_RTX;
4741 ptr->stores = NULL_RTX;
4742 ptr->reaching_reg = NULL_RTX;
4745 ptr->hash_index = hash;
4746 pre_ldst_mems = ptr;
4752 /* Free up an individual ldst entry. */
4755 free_ldst_entry (struct ls_expr * ptr)
4757 free_INSN_LIST_list (& ptr->loads);
4758 free_INSN_LIST_list (& ptr->stores);
4763 /* Free up all memory associated with the ldst list. */
4766 free_ldst_mems (void)
4769 htab_delete (pre_ldst_table);
4770 pre_ldst_table = NULL;
4772 while (pre_ldst_mems)
4774 struct ls_expr * tmp = pre_ldst_mems;
4776 pre_ldst_mems = pre_ldst_mems->next;
4778 free_ldst_entry (tmp);
4781 pre_ldst_mems = NULL;
4784 /* Dump debugging info about the ldst list. */
4787 print_ldst_list (FILE * file)
4789 struct ls_expr * ptr;
4791 fprintf (file, "LDST list: \n");
4793 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
4795 fprintf (file, " Pattern (%3d): ", ptr->index);
4797 print_rtl (file, ptr->pattern);
4799 fprintf (file, "\n Loads : ");
4802 print_rtl (file, ptr->loads);
4804 fprintf (file, "(nil)");
4806 fprintf (file, "\n Stores : ");
4809 print_rtl (file, ptr->stores);
4811 fprintf (file, "(nil)");
4813 fprintf (file, "\n\n");
4816 fprintf (file, "\n");
4819 /* Returns 1 if X is in the list of ldst only expressions. */
4821 static struct ls_expr *
4822 find_rtx_in_ldst (rtx x)
4826 if (!pre_ldst_table)
4829 slot = htab_find_slot (pre_ldst_table, &e, NO_INSERT);
4830 if (!slot || ((struct ls_expr *)*slot)->invalid)
4832 return (struct ls_expr *) *slot;
4835 /* Return first item in the list. */
4837 static inline struct ls_expr *
4838 first_ls_expr (void)
4840 return pre_ldst_mems;
4843 /* Return the next item in the list after the specified one. */
4845 static inline struct ls_expr *
4846 next_ls_expr (struct ls_expr * ptr)
4851 /* Load Motion for loads which only kill themselves. */
4853 /* Return true if x is a simple MEM operation, with no registers or
4854 side effects. These are the types of loads we consider for the
4855 ld_motion list, otherwise we let the usual aliasing take care of it. */
4858 simple_mem (const_rtx x)
4863 if (MEM_VOLATILE_P (x))
4866 if (GET_MODE (x) == BLKmode)
4869 /* If we are handling exceptions, we must be careful with memory references
4870 that may trap. If we are not, the behavior is undefined, so we may just
4872 if (cfun->can_throw_non_call_exceptions && may_trap_p (x))
4875 if (side_effects_p (x))
4878 /* Do not consider function arguments passed on stack. */
4879 if (reg_mentioned_p (stack_pointer_rtx, x))
4882 if (flag_float_store && FLOAT_MODE_P (GET_MODE (x)))
4888 /* Make sure there isn't a buried reference in this pattern anywhere.
4889 If there is, invalidate the entry for it since we're not capable
4890 of fixing it up just yet.. We have to be sure we know about ALL
4891 loads since the aliasing code will allow all entries in the
4892 ld_motion list to not-alias itself. If we miss a load, we will get
4893 the wrong value since gcse might common it and we won't know to
4897 invalidate_any_buried_refs (rtx x)
4901 struct ls_expr * ptr;
4903 /* Invalidate it in the list. */
4904 if (MEM_P (x) && simple_mem (x))
4906 ptr = ldst_entry (x);
4910 /* Recursively process the insn. */
4911 fmt = GET_RTX_FORMAT (GET_CODE (x));
4913 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
4916 invalidate_any_buried_refs (XEXP (x, i));
4917 else if (fmt[i] == 'E')
4918 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4919 invalidate_any_buried_refs (XVECEXP (x, i, j));
4923 /* Find all the 'simple' MEMs which are used in LOADs and STORES. Simple
4924 being defined as MEM loads and stores to symbols, with no side effects
4925 and no registers in the expression. For a MEM destination, we also
4926 check that the insn is still valid if we replace the destination with a
4927 REG, as is done in update_ld_motion_stores. If there are any uses/defs
4928 which don't match this criteria, they are invalidated and trimmed out
4932 compute_ld_motion_mems (void)
4934 struct ls_expr * ptr;
4938 pre_ldst_mems = NULL;
4939 pre_ldst_table = htab_create (13, pre_ldst_expr_hash,
4940 pre_ldst_expr_eq, NULL);
4944 FOR_BB_INSNS (bb, insn)
4946 if (NONDEBUG_INSN_P (insn))
4948 if (GET_CODE (PATTERN (insn)) == SET)
4950 rtx src = SET_SRC (PATTERN (insn));
4951 rtx dest = SET_DEST (PATTERN (insn));
4953 /* Check for a simple LOAD... */
4954 if (MEM_P (src) && simple_mem (src))
4956 ptr = ldst_entry (src);
4958 ptr->loads = alloc_INSN_LIST (insn, ptr->loads);
4964 /* Make sure there isn't a buried load somewhere. */
4965 invalidate_any_buried_refs (src);
4968 /* Check for stores. Don't worry about aliased ones, they
4969 will block any movement we might do later. We only care
4970 about this exact pattern since those are the only
4971 circumstance that we will ignore the aliasing info. */
4972 if (MEM_P (dest) && simple_mem (dest))
4974 ptr = ldst_entry (dest);
4977 && GET_CODE (src) != ASM_OPERANDS
4978 /* Check for REG manually since want_to_gcse_p
4979 returns 0 for all REGs. */
4980 && can_assign_to_reg_without_clobbers_p (src))
4981 ptr->stores = alloc_INSN_LIST (insn, ptr->stores);
4987 invalidate_any_buried_refs (PATTERN (insn));
4993 /* Remove any references that have been either invalidated or are not in the
4994 expression list for pre gcse. */
4997 trim_ld_motion_mems (void)
4999 struct ls_expr * * last = & pre_ldst_mems;
5000 struct ls_expr * ptr = pre_ldst_mems;
5006 /* Delete if entry has been made invalid. */
5009 /* Delete if we cannot find this mem in the expression list. */
5010 unsigned int hash = ptr->hash_index % expr_hash_table.size;
5012 for (expr = expr_hash_table.table[hash];
5014 expr = expr->next_same_hash)
5015 if (expr_equiv_p (expr->expr, ptr->pattern))
5019 expr = (struct expr *) 0;
5023 /* Set the expression field if we are keeping it. */
5031 htab_remove_elt_with_hash (pre_ldst_table, ptr, ptr->hash_index);
5032 free_ldst_entry (ptr);
5037 /* Show the world what we've found. */
5038 if (dump_file && pre_ldst_mems != NULL)
5039 print_ldst_list (dump_file);
5042 /* This routine will take an expression which we are replacing with
5043 a reaching register, and update any stores that are needed if
5044 that expression is in the ld_motion list. Stores are updated by
5045 copying their SRC to the reaching register, and then storing
5046 the reaching register into the store location. These keeps the
5047 correct value in the reaching register for the loads. */
5050 update_ld_motion_stores (struct expr * expr)
5052 struct ls_expr * mem_ptr;
5054 if ((mem_ptr = find_rtx_in_ldst (expr->expr)))
5056 /* We can try to find just the REACHED stores, but is shouldn't
5057 matter to set the reaching reg everywhere... some might be
5058 dead and should be eliminated later. */
5060 /* We replace (set mem expr) with (set reg expr) (set mem reg)
5061 where reg is the reaching reg used in the load. We checked in
5062 compute_ld_motion_mems that we can replace (set mem expr) with
5063 (set reg expr) in that insn. */
5064 rtx list = mem_ptr->stores;
5066 for ( ; list != NULL_RTX; list = XEXP (list, 1))
5068 rtx insn = XEXP (list, 0);
5069 rtx pat = PATTERN (insn);
5070 rtx src = SET_SRC (pat);
5071 rtx reg = expr->reaching_reg;
5074 /* If we've already copied it, continue. */
5075 if (expr->reaching_reg == src)
5080 fprintf (dump_file, "PRE: store updated with reaching reg ");
5081 print_rtl (dump_file, expr->reaching_reg);
5082 fprintf (dump_file, ":\n ");
5083 print_inline_rtx (dump_file, insn, 8);
5084 fprintf (dump_file, "\n");
5087 copy = gen_move_insn (reg, copy_rtx (SET_SRC (pat)));
5088 emit_insn_before (copy, insn);
5089 SET_SRC (pat) = reg;
5090 df_insn_rescan (insn);
5092 /* un-recognize this pattern since it's probably different now. */
5093 INSN_CODE (insn) = -1;
5094 gcse_create_count++;
5099 /* Return true if the graph is too expensive to optimize. PASS is the
5100 optimization about to be performed. */
5103 is_too_expensive (const char *pass)
5105 /* Trying to perform global optimizations on flow graphs which have
5106 a high connectivity will take a long time and is unlikely to be
5107 particularly useful.
5109 In normal circumstances a cfg should have about twice as many
5110 edges as blocks. But we do not want to punish small functions
5111 which have a couple switch statements. Rather than simply
5112 threshold the number of blocks, uses something with a more
5113 graceful degradation. */
5114 if (n_edges > 20000 + n_basic_blocks * 4)
5116 warning (OPT_Wdisabled_optimization,
5117 "%s: %d basic blocks and %d edges/basic block",
5118 pass, n_basic_blocks, n_edges / n_basic_blocks);
5123 /* If allocating memory for the cprop bitmap would take up too much
5124 storage it's better just to disable the optimization. */
5126 * SBITMAP_SET_SIZE (max_reg_num ())
5127 * sizeof (SBITMAP_ELT_TYPE)) > MAX_GCSE_MEMORY)
5129 warning (OPT_Wdisabled_optimization,
5130 "%s: %d basic blocks and %d registers",
5131 pass, n_basic_blocks, max_reg_num ());
5140 /* Main function for the CPROP pass. */
5143 one_cprop_pass (void)
5147 /* Return if there's nothing to do, or it is too expensive. */
5148 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
5149 || is_too_expensive (_ ("const/copy propagation disabled")))
5152 global_const_prop_count = local_const_prop_count = 0;
5153 global_copy_prop_count = local_copy_prop_count = 0;
5156 gcc_obstack_init (&gcse_obstack);
5159 /* Do a local const/copy propagation pass first. The global pass
5160 only handles global opportunities.
5161 If the local pass changes something, remove any unreachable blocks
5162 because the CPROP global dataflow analysis may get into infinite
5163 loops for CFGs with unreachable blocks.
5165 FIXME: This local pass should not be necessary after CSE (but for
5166 some reason it still is). It is also (proven) not necessary
5167 to run the local pass right after FWPWOP.
5169 FIXME: The global analysis would not get into infinite loops if it
5170 would use the DF solver (via df_simple_dataflow) instead of
5171 the solver implemented in this file. */
5172 if (local_cprop_pass ())
5174 delete_unreachable_blocks ();
5178 /* Determine implicit sets. */
5179 implicit_sets = XCNEWVEC (rtx, last_basic_block);
5180 find_implicit_sets ();
5182 alloc_hash_table (&set_hash_table, 1);
5183 compute_hash_table (&set_hash_table);
5185 /* Free implicit_sets before peak usage. */
5186 free (implicit_sets);
5187 implicit_sets = NULL;
5190 dump_hash_table (dump_file, "SET", &set_hash_table);
5191 if (set_hash_table.n_elems > 0)
5196 alloc_cprop_mem (last_basic_block, set_hash_table.n_elems);
5197 compute_cprop_data ();
5199 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb, EXIT_BLOCK_PTR, next_bb)
5201 /* Reset tables used to keep track of what's still valid [since
5202 the start of the block]. */
5203 reset_opr_set_tables ();
5205 FOR_BB_INSNS (bb, insn)
5208 changed |= cprop_insn (insn);
5210 /* Keep track of everything modified by this insn. */
5211 /* ??? Need to be careful w.r.t. mods done to INSN.
5212 Don't call mark_oprs_set if we turned the
5213 insn into a NOTE. */
5214 if (! NOTE_P (insn))
5215 mark_oprs_set (insn);
5219 changed |= bypass_conditional_jumps ();
5223 free_hash_table (&set_hash_table);
5225 obstack_free (&gcse_obstack, NULL);
5229 fprintf (dump_file, "CPROP of %s, %d basic blocks, %d bytes needed, ",
5230 current_function_name (), n_basic_blocks, bytes_used);
5231 fprintf (dump_file, "%d local const props, %d local copy props, ",
5232 local_const_prop_count, local_copy_prop_count);
5233 fprintf (dump_file, "%d global const props, %d global copy props\n\n",
5234 global_const_prop_count, global_copy_prop_count);
5241 /* All the passes implemented in this file. Each pass has its
5242 own gate and execute function, and at the end of the file a
5243 pass definition for passes.c.
5245 We do not construct an accurate cfg in functions which call
5246 setjmp, so none of these passes runs if the function calls
5248 FIXME: Should just handle setjmp via REG_SETJMP notes. */
5251 gate_rtl_cprop (void)
5253 return optimize > 0 && flag_gcse
5254 && !cfun->calls_setjmp
5259 execute_rtl_cprop (void)
5261 delete_unreachable_blocks ();
5262 df_set_flags (DF_LR_RUN_DCE);
5264 flag_rerun_cse_after_global_opts |= one_cprop_pass ();
5271 return optimize > 0 && flag_gcse
5272 && !cfun->calls_setjmp
5273 && optimize_function_for_speed_p (cfun)
5278 execute_rtl_pre (void)
5280 delete_unreachable_blocks ();
5282 flag_rerun_cse_after_global_opts |= one_pre_gcse_pass ();
5287 gate_rtl_hoist (void)
5289 return optimize > 0 && flag_gcse
5290 && !cfun->calls_setjmp
5291 /* It does not make sense to run code hoisting unless we are optimizing
5292 for code size -- it rarely makes programs faster, and can make then
5293 bigger if we did PRE (when optimizing for space, we don't run PRE). */
5294 && optimize_function_for_size_p (cfun)
5299 execute_rtl_hoist (void)
5301 delete_unreachable_blocks ();
5303 flag_rerun_cse_after_global_opts |= one_code_hoisting_pass ();
5307 struct rtl_opt_pass pass_rtl_cprop =
5312 gate_rtl_cprop, /* gate */
5313 execute_rtl_cprop, /* execute */
5316 0, /* static_pass_number */
5317 TV_CPROP, /* tv_id */
5318 PROP_cfglayout, /* properties_required */
5319 0, /* properties_provided */
5320 0, /* properties_destroyed */
5321 0, /* todo_flags_start */
5322 TODO_df_finish | TODO_verify_rtl_sharing |
5324 TODO_verify_flow | TODO_ggc_collect /* todo_flags_finish */
5328 struct rtl_opt_pass pass_rtl_pre =
5332 "rtl pre", /* name */
5333 gate_rtl_pre, /* gate */
5334 execute_rtl_pre, /* execute */
5337 0, /* static_pass_number */
5339 PROP_cfglayout, /* properties_required */
5340 0, /* properties_provided */
5341 0, /* properties_destroyed */
5342 0, /* todo_flags_start */
5343 TODO_df_finish | TODO_verify_rtl_sharing |
5345 TODO_verify_flow | TODO_ggc_collect /* todo_flags_finish */
5349 struct rtl_opt_pass pass_rtl_hoist =
5354 gate_rtl_hoist, /* gate */
5355 execute_rtl_hoist, /* execute */
5358 0, /* static_pass_number */
5359 TV_HOIST, /* tv_id */
5360 PROP_cfglayout, /* properties_required */
5361 0, /* properties_provided */
5362 0, /* properties_destroyed */
5363 0, /* todo_flags_start */
5364 TODO_df_finish | TODO_verify_rtl_sharing |
5366 TODO_verify_flow | TODO_ggc_collect /* todo_flags_finish */
5370 #include "gt-gcse.h"