1 /* Global common subexpression elimination/Partial redundancy elimination
2 and global constant/copy propagation for GNU compiler.
3 Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
4 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
23 - reordering of memory allocation and freeing to be more space efficient
24 - do rough calc of how many regs are needed in each block, and a rough
25 calc of how many regs are available in each class and use that to
26 throttle back the code in cases where RTX_COST is minimal.
27 - a store to the same address as a load does not kill the load if the
28 source of the store is also the destination of the load. Handling this
29 allows more load motion, particularly out of loops.
33 /* References searched while implementing this.
35 Compilers Principles, Techniques and Tools
39 Global Optimization by Suppression of Partial Redundancies
41 communications of the acm, Vol. 22, Num. 2, Feb. 1979
43 A Portable Machine-Independent Global Optimizer - Design and Measurements
45 Stanford Ph.D. thesis, Dec. 1983
47 A Fast Algorithm for Code Movement Optimization
49 SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988
51 A Solution to a Problem with Morel and Renvoise's
52 Global Optimization by Suppression of Partial Redundancies
53 K-H Drechsler, M.P. Stadel
54 ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988
56 Practical Adaptation of the Global Optimization
57 Algorithm of Morel and Renvoise
59 ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991
61 Efficiently Computing Static Single Assignment Form and the Control
63 R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck
64 ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991
67 J. Knoop, O. Ruthing, B. Steffen
68 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
70 What's In a Region? Or Computing Control Dependence Regions in Near-Linear
71 Time for Reducible Flow Control
73 ACM Letters on Programming Languages and Systems,
74 Vol. 2, Num. 1-4, Mar-Dec 1993
76 An Efficient Representation for Sparse Sets
77 Preston Briggs, Linda Torczon
78 ACM Letters on Programming Languages and Systems,
79 Vol. 2, Num. 1-4, Mar-Dec 1993
81 A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion
82 K-H Drechsler, M.P. Stadel
83 ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993
85 Partial Dead Code Elimination
86 J. Knoop, O. Ruthing, B. Steffen
87 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
89 Effective Partial Redundancy Elimination
90 P. Briggs, K.D. Cooper
91 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
93 The Program Structure Tree: Computing Control Regions in Linear Time
94 R. Johnson, D. Pearson, K. Pingali
95 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
97 Optimal Code Motion: Theory and Practice
98 J. Knoop, O. Ruthing, B. Steffen
99 ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994
101 The power of assignment motion
102 J. Knoop, O. Ruthing, B. Steffen
103 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
105 Global code motion / global value numbering
107 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
109 Value Driven Redundancy Elimination
111 Rice University Ph.D. thesis, Apr. 1996
115 Massively Scalar Compiler Project, Rice University, Sep. 1996
117 High Performance Compilers for Parallel Computing
121 Advanced Compiler Design and Implementation
123 Morgan Kaufmann, 1997
125 Building an Optimizing Compiler
129 People wishing to speed up the code here should read:
130 Elimination Algorithms for Data Flow Analysis
131 B.G. Ryder, M.C. Paull
132 ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986
134 How to Analyze Large Programs Efficiently and Informatively
135 D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck
136 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
138 People wishing to do something different can find various possibilities
139 in the above papers and elsewhere.
144 #include "coretypes.h"
146 #include "diagnostic-core.h"
153 #include "hard-reg-set.h"
155 #include "insn-config.h"
157 #include "basic-block.h"
159 #include "function.h"
168 #include "tree-pass.h"
175 /* We support GCSE via Partial Redundancy Elimination. PRE optimizations
176 are a superset of those done by classic GCSE.
178 We perform the following steps:
180 1) Compute table of places where registers are set.
182 2) Perform copy/constant propagation.
184 3) Perform global cse using lazy code motion if not optimizing
185 for size, or code hoisting if we are.
187 4) Perform another pass of copy/constant propagation. Try to bypass
188 conditional jumps if the condition can be computed from a value of
191 Two passes of copy/constant propagation are done because the first one
192 enables more GCSE and the second one helps to clean up the copies that
193 GCSE creates. This is needed more for PRE than for Classic because Classic
194 GCSE will try to use an existing register containing the common
195 subexpression rather than create a new one. This is harder to do for PRE
196 because of the code motion (which Classic GCSE doesn't do).
198 Expressions we are interested in GCSE-ing are of the form
199 (set (pseudo-reg) (expression)).
200 Function want_to_gcse_p says what these are.
202 In addition, expressions in REG_EQUAL notes are candidates for GCSE-ing.
203 This allows PRE to hoist expressions that are expressed in multiple insns,
204 such as complex address calculations (e.g. for PIC code, or loads with a
205 high part and a low part).
207 PRE handles moving invariant expressions out of loops (by treating them as
208 partially redundant).
210 **********************
212 We used to support multiple passes but there are diminishing returns in
213 doing so. The first pass usually makes 90% of the changes that are doable.
214 A second pass can make a few more changes made possible by the first pass.
215 Experiments show any further passes don't make enough changes to justify
218 A study of spec92 using an unlimited number of passes:
219 [1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83,
220 [6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2,
221 [12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1
223 It was found doing copy propagation between each pass enables further
226 This study was done before expressions in REG_EQUAL notes were added as
227 candidate expressions for optimization, and before the GIMPLE optimizers
228 were added. Probably, multiple passes is even less efficient now than
229 at the time when the study was conducted.
231 PRE is quite expensive in complicated functions because the DFA can take
232 a while to converge. Hence we only perform one pass.
234 **********************
236 The steps for PRE are:
238 1) Build the hash table of expressions we wish to GCSE (expr_hash_table).
240 2) Perform the data flow analysis for PRE.
242 3) Delete the redundant instructions
244 4) Insert the required copies [if any] that make the partially
245 redundant instructions fully redundant.
247 5) For other reaching expressions, insert an instruction to copy the value
248 to a newly created pseudo that will reach the redundant instruction.
250 The deletion is done first so that when we do insertions we
251 know which pseudo reg to use.
253 Various papers have argued that PRE DFA is expensive (O(n^2)) and others
254 argue it is not. The number of iterations for the algorithm to converge
255 is typically 2-4 so I don't view it as that expensive (relatively speaking).
257 PRE GCSE depends heavily on the second CPROP pass to clean up the copies
258 we create. To make an expression reach the place where it's redundant,
259 the result of the expression is copied to a new register, and the redundant
260 expression is deleted by replacing it with this new register. Classic GCSE
261 doesn't have this problem as much as it computes the reaching defs of
262 each register in each block and thus can try to use an existing
265 /* GCSE global vars. */
267 struct target_gcse default_target_gcse;
268 #if SWITCHABLE_TARGET
269 struct target_gcse *this_target_gcse = &default_target_gcse;
272 /* Set to non-zero if CSE should run after all GCSE optimizations are done. */
273 int flag_rerun_cse_after_global_opts;
275 /* An obstack for our working variables. */
276 static struct obstack gcse_obstack;
278 struct reg_use {rtx reg_rtx; };
280 /* Hash table of expressions. */
284 /* The expression (SET_SRC for expressions, PATTERN for assignments). */
286 /* Index in the available expression bitmaps. */
288 /* Next entry with the same hash. */
289 struct expr *next_same_hash;
290 /* List of anticipatable occurrences in basic blocks in the function.
291 An "anticipatable occurrence" is one that is the first occurrence in the
292 basic block, the operands are not modified in the basic block prior
293 to the occurrence and the output is not used between the start of
294 the block and the occurrence. */
295 struct occr *antic_occr;
296 /* List of available occurrence in basic blocks in the function.
297 An "available occurrence" is one that is the last occurrence in the
298 basic block and the operands are not modified by following statements in
299 the basic block [including this insn]. */
300 struct occr *avail_occr;
301 /* Non-null if the computation is PRE redundant.
302 The value is the newly created pseudo-reg to record a copy of the
303 expression in all the places that reach the redundant copy. */
305 /* Maximum distance in instructions this expression can travel.
306 We avoid moving simple expressions for more than a few instructions
307 to keep register pressure under control.
308 A value of "0" removes restrictions on how far the expression can
313 /* Occurrence of an expression.
314 There is one per basic block. If a pattern appears more than once the
315 last appearance is used [or first for anticipatable expressions]. */
319 /* Next occurrence of this expression. */
321 /* The insn that computes the expression. */
323 /* Nonzero if this [anticipatable] occurrence has been deleted. */
325 /* Nonzero if this [available] occurrence has been copied to
327 /* ??? This is mutually exclusive with deleted_p, so they could share
332 typedef struct occr *occr_t;
334 DEF_VEC_ALLOC_P (occr_t, heap);
336 /* Expression and copy propagation hash tables.
337 Each hash table is an array of buckets.
338 ??? It is known that if it were an array of entries, structure elements
339 `next_same_hash' and `bitmap_index' wouldn't be necessary. However, it is
340 not clear whether in the final analysis a sufficient amount of memory would
341 be saved as the size of the available expression bitmaps would be larger
342 [one could build a mapping table without holes afterwards though].
343 Someday I'll perform the computation and figure it out. */
348 This is an array of `expr_hash_table_size' elements. */
351 /* Size of the hash table, in elements. */
354 /* Number of hash table elements. */
355 unsigned int n_elems;
357 /* Whether the table is expression of copy propagation one. */
361 /* Expression hash table. */
362 static struct hash_table_d expr_hash_table;
364 /* Copy propagation hash table. */
365 static struct hash_table_d set_hash_table;
367 /* This is a list of expressions which are MEMs and will be used by load
369 Load motion tracks MEMs which aren't killed by
370 anything except itself. (i.e., loads and stores to a single location).
371 We can then allow movement of these MEM refs with a little special
372 allowance. (all stores copy the same value to the reaching reg used
373 for the loads). This means all values used to store into memory must have
374 no side effects so we can re-issue the setter value.
375 Store Motion uses this structure as an expression table to track stores
376 which look interesting, and might be moveable towards the exit block. */
380 struct expr * expr; /* Gcse expression reference for LM. */
381 rtx pattern; /* Pattern of this mem. */
382 rtx pattern_regs; /* List of registers mentioned by the mem. */
383 rtx loads; /* INSN list of loads seen. */
384 rtx stores; /* INSN list of stores seen. */
385 struct ls_expr * next; /* Next in the list. */
386 int invalid; /* Invalid for some reason. */
387 int index; /* If it maps to a bitmap index. */
388 unsigned int hash_index; /* Index when in a hash table. */
389 rtx reaching_reg; /* Register to use when re-writing. */
392 /* Array of implicit set patterns indexed by basic block index. */
393 static rtx *implicit_sets;
395 /* Head of the list of load/store memory refs. */
396 static struct ls_expr * pre_ldst_mems = NULL;
398 /* Hashtable for the load/store memory refs. */
399 static htab_t pre_ldst_table = NULL;
401 /* Bitmap containing one bit for each register in the program.
402 Used when performing GCSE to track which registers have been set since
403 the start of the basic block. */
404 static regset reg_set_bitmap;
406 /* Array, indexed by basic block number for a list of insns which modify
407 memory within that block. */
408 static rtx * modify_mem_list;
409 static bitmap modify_mem_list_set;
411 /* This array parallels modify_mem_list, but is kept canonicalized. */
412 static rtx * canon_modify_mem_list;
414 /* Bitmap indexed by block numbers to record which blocks contain
416 static bitmap blocks_with_calls;
418 /* Various variables for statistics gathering. */
420 /* Memory used in a pass.
421 This isn't intended to be absolutely precise. Its intent is only
422 to keep an eye on memory usage. */
423 static int bytes_used;
425 /* GCSE substitutions made. */
426 static int gcse_subst_count;
427 /* Number of copy instructions created. */
428 static int gcse_create_count;
429 /* Number of local constants propagated. */
430 static int local_const_prop_count;
431 /* Number of local copies propagated. */
432 static int local_copy_prop_count;
433 /* Number of global constants propagated. */
434 static int global_const_prop_count;
435 /* Number of global copies propagated. */
436 static int global_copy_prop_count;
438 /* Doing code hoisting. */
439 static bool doing_code_hoisting_p = false;
441 /* For available exprs */
442 static sbitmap *ae_kill;
444 static void compute_can_copy (void);
445 static void *gmalloc (size_t) ATTRIBUTE_MALLOC;
446 static void *gcalloc (size_t, size_t) ATTRIBUTE_MALLOC;
447 static void *gcse_alloc (unsigned long);
448 static void alloc_gcse_mem (void);
449 static void free_gcse_mem (void);
450 static void hash_scan_insn (rtx, struct hash_table_d *);
451 static void hash_scan_set (rtx, rtx, struct hash_table_d *);
452 static void hash_scan_clobber (rtx, rtx, struct hash_table_d *);
453 static void hash_scan_call (rtx, rtx, struct hash_table_d *);
454 static int want_to_gcse_p (rtx, int *);
455 static bool gcse_constant_p (const_rtx);
456 static int oprs_unchanged_p (const_rtx, const_rtx, int);
457 static int oprs_anticipatable_p (const_rtx, const_rtx);
458 static int oprs_available_p (const_rtx, const_rtx);
459 static void insert_expr_in_table (rtx, enum machine_mode, rtx, int, int, int,
460 struct hash_table_d *);
461 static void insert_set_in_table (rtx, rtx, struct hash_table_d *);
462 static unsigned int hash_expr (const_rtx, enum machine_mode, int *, int);
463 static unsigned int hash_set (int, int);
464 static int expr_equiv_p (const_rtx, const_rtx);
465 static void record_last_reg_set_info (rtx, int);
466 static void record_last_mem_set_info (rtx);
467 static void record_last_set_info (rtx, const_rtx, void *);
468 static void compute_hash_table (struct hash_table_d *);
469 static void alloc_hash_table (struct hash_table_d *, int);
470 static void free_hash_table (struct hash_table_d *);
471 static void compute_hash_table_work (struct hash_table_d *);
472 static void dump_hash_table (FILE *, const char *, struct hash_table_d *);
473 static struct expr *lookup_set (unsigned int, struct hash_table_d *);
474 static struct expr *next_set (unsigned int, struct expr *);
475 static void reset_opr_set_tables (void);
476 static int oprs_not_set_p (const_rtx, const_rtx);
477 static void mark_call (rtx);
478 static void mark_set (rtx, rtx);
479 static void mark_clobber (rtx, rtx);
480 static void mark_oprs_set (rtx);
481 static void alloc_cprop_mem (int, int);
482 static void free_cprop_mem (void);
483 static void compute_transp (const_rtx, int, sbitmap *, int);
484 static void compute_local_properties (sbitmap *, sbitmap *, sbitmap *,
485 struct hash_table_d *);
486 static void compute_cprop_data (void);
487 static void find_used_regs (rtx *, void *);
488 static int try_replace_reg (rtx, rtx, rtx);
489 static struct expr *find_avail_set (int, rtx);
490 static int cprop_jump (basic_block, rtx, rtx, rtx, rtx);
491 static void mems_conflict_for_gcse_p (rtx, const_rtx, void *);
492 static int load_killed_in_block_p (const_basic_block, int, const_rtx, int);
493 static void canon_list_insert (rtx, const_rtx, void *);
494 static int cprop_insn (rtx);
495 static void find_implicit_sets (void);
496 static int one_cprop_pass (void);
497 static bool constprop_register (rtx, rtx, rtx);
498 static struct expr *find_bypass_set (int, int);
499 static bool reg_killed_on_edge (const_rtx, const_edge);
500 static int bypass_block (basic_block, rtx, rtx);
501 static int bypass_conditional_jumps (void);
502 static void alloc_pre_mem (int, int);
503 static void free_pre_mem (void);
504 static void compute_pre_data (void);
505 static int pre_expr_reaches_here_p (basic_block, struct expr *,
507 static void insert_insn_end_basic_block (struct expr *, basic_block);
508 static void pre_insert_copy_insn (struct expr *, rtx);
509 static void pre_insert_copies (void);
510 static int pre_delete (void);
511 static int pre_gcse (void);
512 static int one_pre_gcse_pass (void);
513 static void add_label_notes (rtx, rtx);
514 static void alloc_code_hoist_mem (int, int);
515 static void free_code_hoist_mem (void);
516 static void compute_code_hoist_vbeinout (void);
517 static void compute_code_hoist_data (void);
518 static int hoist_expr_reaches_here_p (basic_block, int, basic_block, char *,
520 static int hoist_code (void);
521 static int one_code_hoisting_pass (void);
522 static rtx process_insert_insn (struct expr *);
523 static int pre_edge_insert (struct edge_list *, struct expr **);
524 static int pre_expr_reaches_here_p_work (basic_block, struct expr *,
525 basic_block, char *);
526 static struct ls_expr * ldst_entry (rtx);
527 static void free_ldst_entry (struct ls_expr *);
528 static void free_ldst_mems (void);
529 static void print_ldst_list (FILE *);
530 static struct ls_expr * find_rtx_in_ldst (rtx);
531 static inline struct ls_expr * first_ls_expr (void);
532 static inline struct ls_expr * next_ls_expr (struct ls_expr *);
533 static int simple_mem (const_rtx);
534 static void invalidate_any_buried_refs (rtx);
535 static void compute_ld_motion_mems (void);
536 static void trim_ld_motion_mems (void);
537 static void update_ld_motion_stores (struct expr *);
538 static void free_insn_expr_list_list (rtx *);
539 static void clear_modify_mem_tables (void);
540 static void free_modify_mem_tables (void);
541 static rtx gcse_emit_move_after (rtx, rtx, rtx);
542 static void local_cprop_find_used_regs (rtx *, void *);
543 static bool do_local_cprop (rtx, rtx);
544 static int local_cprop_pass (void);
545 static bool is_too_expensive (const char *);
547 #define GNEW(T) ((T *) gmalloc (sizeof (T)))
548 #define GCNEW(T) ((T *) gcalloc (1, sizeof (T)))
550 #define GNEWVEC(T, N) ((T *) gmalloc (sizeof (T) * (N)))
551 #define GCNEWVEC(T, N) ((T *) gcalloc ((N), sizeof (T)))
553 #define GNEWVAR(T, S) ((T *) gmalloc ((S)))
554 #define GCNEWVAR(T, S) ((T *) gcalloc (1, (S)))
556 #define GOBNEW(T) ((T *) gcse_alloc (sizeof (T)))
557 #define GOBNEWVAR(T, S) ((T *) gcse_alloc ((S)))
559 /* Misc. utilities. */
562 (this_target_gcse->x_can_copy)
563 #define can_copy_init_p \
564 (this_target_gcse->x_can_copy_init_p)
566 /* Compute which modes support reg/reg copy operations. */
569 compute_can_copy (void)
572 #ifndef AVOID_CCMODE_COPIES
575 memset (can_copy, 0, NUM_MACHINE_MODES);
578 for (i = 0; i < NUM_MACHINE_MODES; i++)
579 if (GET_MODE_CLASS (i) == MODE_CC)
581 #ifdef AVOID_CCMODE_COPIES
584 reg = gen_rtx_REG ((enum machine_mode) i, LAST_VIRTUAL_REGISTER + 1);
585 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, reg));
586 if (recog (PATTERN (insn), insn, NULL) >= 0)
596 /* Returns whether the mode supports reg/reg copy operations. */
599 can_copy_p (enum machine_mode mode)
601 if (! can_copy_init_p)
604 can_copy_init_p = true;
607 return can_copy[mode] != 0;
611 /* Cover function to xmalloc to record bytes allocated. */
614 gmalloc (size_t size)
617 return xmalloc (size);
620 /* Cover function to xcalloc to record bytes allocated. */
623 gcalloc (size_t nelem, size_t elsize)
625 bytes_used += nelem * elsize;
626 return xcalloc (nelem, elsize);
629 /* Cover function to obstack_alloc. */
632 gcse_alloc (unsigned long size)
635 return obstack_alloc (&gcse_obstack, size);
638 /* Allocate memory for the reg/memory set tracking tables.
639 This is called at the start of each pass. */
642 alloc_gcse_mem (void)
644 /* Allocate vars to track sets of regs. */
645 reg_set_bitmap = ALLOC_REG_SET (NULL);
647 /* Allocate array to keep a list of insns which modify memory in each
649 modify_mem_list = GCNEWVEC (rtx, last_basic_block);
650 canon_modify_mem_list = GCNEWVEC (rtx, last_basic_block);
651 modify_mem_list_set = BITMAP_ALLOC (NULL);
652 blocks_with_calls = BITMAP_ALLOC (NULL);
655 /* Free memory allocated by alloc_gcse_mem. */
660 free_modify_mem_tables ();
661 BITMAP_FREE (modify_mem_list_set);
662 BITMAP_FREE (blocks_with_calls);
665 /* Compute the local properties of each recorded expression.
667 Local properties are those that are defined by the block, irrespective of
670 An expression is transparent in a block if its operands are not modified
673 An expression is computed (locally available) in a block if it is computed
674 at least once and expression would contain the same value if the
675 computation was moved to the end of the block.
677 An expression is locally anticipatable in a block if it is computed at
678 least once and expression would contain the same value if the computation
679 was moved to the beginning of the block.
681 We call this routine for cprop, pre and code hoisting. They all compute
682 basically the same information and thus can easily share this code.
684 TRANSP, COMP, and ANTLOC are destination sbitmaps for recording local
685 properties. If NULL, then it is not necessary to compute or record that
688 TABLE controls which hash table to look at. If it is set hash table,
689 additionally, TRANSP is computed as ~TRANSP, since this is really cprop's
693 compute_local_properties (sbitmap *transp, sbitmap *comp, sbitmap *antloc,
694 struct hash_table_d *table)
698 /* Initialize any bitmaps that were passed in. */
702 sbitmap_vector_zero (transp, last_basic_block);
704 sbitmap_vector_ones (transp, last_basic_block);
708 sbitmap_vector_zero (comp, last_basic_block);
710 sbitmap_vector_zero (antloc, last_basic_block);
712 for (i = 0; i < table->size; i++)
716 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
718 int indx = expr->bitmap_index;
721 /* The expression is transparent in this block if it is not killed.
722 We start by assuming all are transparent [none are killed], and
723 then reset the bits for those that are. */
725 compute_transp (expr->expr, indx, transp, table->set_p);
727 /* The occurrences recorded in antic_occr are exactly those that
728 we want to set to nonzero in ANTLOC. */
730 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
732 SET_BIT (antloc[BLOCK_FOR_INSN (occr->insn)->index], indx);
734 /* While we're scanning the table, this is a good place to
739 /* The occurrences recorded in avail_occr are exactly those that
740 we want to set to nonzero in COMP. */
742 for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
744 SET_BIT (comp[BLOCK_FOR_INSN (occr->insn)->index], indx);
746 /* While we're scanning the table, this is a good place to
751 /* While we're scanning the table, this is a good place to
753 expr->reaching_reg = 0;
758 /* Hash table support. */
760 struct reg_avail_info
767 static struct reg_avail_info *reg_avail_info;
768 static basic_block current_bb;
771 /* See whether X, the source of a set, is something we want to consider for
775 want_to_gcse_p (rtx x, int *max_distance_ptr)
778 /* On register stack architectures, don't GCSE constants from the
779 constant pool, as the benefits are often swamped by the overhead
780 of shuffling the register stack between basic blocks. */
781 if (IS_STACK_MODE (GET_MODE (x)))
782 x = avoid_constant_pool_reference (x);
785 /* GCSE'ing constants:
787 We do not specifically distinguish between constant and non-constant
788 expressions in PRE and Hoist. We use rtx_cost below to limit
789 the maximum distance simple expressions can travel.
791 Nevertheless, constants are much easier to GCSE, and, hence,
792 it is easy to overdo the optimizations. Usually, excessive PRE and
793 Hoisting of constant leads to increased register pressure.
795 RA can deal with this by rematerialing some of the constants.
796 Therefore, it is important that the back-end generates sets of constants
797 in a way that allows reload rematerialize them under high register
798 pressure, i.e., a pseudo register with REG_EQUAL to constant
799 is set only once. Failing to do so will result in IRA/reload
800 spilling such constants under high register pressure instead of
801 rematerializing them. */
803 switch (GET_CODE (x))
814 if (!doing_code_hoisting_p)
815 /* Do not PRE constants. */
821 if (doing_code_hoisting_p)
822 /* PRE doesn't implement max_distance restriction. */
827 gcc_assert (!optimize_function_for_speed_p (cfun)
828 && optimize_function_for_size_p (cfun));
829 cost = rtx_cost (x, SET, 0);
831 if (cost < COSTS_N_INSNS (GCSE_UNRESTRICTED_COST))
833 max_distance = (GCSE_COST_DISTANCE_RATIO * cost) / 10;
834 if (max_distance == 0)
837 gcc_assert (max_distance > 0);
842 if (max_distance_ptr)
843 *max_distance_ptr = max_distance;
846 return can_assign_to_reg_without_clobbers_p (x);
850 /* Used internally by can_assign_to_reg_without_clobbers_p. */
852 static GTY(()) rtx test_insn;
854 /* Return true if we can assign X to a pseudo register such that the
855 resulting insn does not result in clobbering a hard register as a
858 Additionally, if the target requires it, check that the resulting insn
859 can be copied. If it cannot, this means that X is special and probably
860 has hidden side-effects we don't want to mess with.
862 This function is typically used by code motion passes, to verify
863 that it is safe to insert an insn without worrying about clobbering
864 maybe live hard regs. */
867 can_assign_to_reg_without_clobbers_p (rtx x)
869 int num_clobbers = 0;
872 /* If this is a valid operand, we are OK. If it's VOIDmode, we aren't. */
873 if (general_operand (x, GET_MODE (x)))
875 else if (GET_MODE (x) == VOIDmode)
878 /* Otherwise, check if we can make a valid insn from it. First initialize
879 our test insn if we haven't already. */
883 = make_insn_raw (gen_rtx_SET (VOIDmode,
884 gen_rtx_REG (word_mode,
885 FIRST_PSEUDO_REGISTER * 2),
887 NEXT_INSN (test_insn) = PREV_INSN (test_insn) = 0;
890 /* Now make an insn like the one we would make when GCSE'ing and see if
892 PUT_MODE (SET_DEST (PATTERN (test_insn)), GET_MODE (x));
893 SET_SRC (PATTERN (test_insn)) = x;
895 icode = recog (PATTERN (test_insn), test_insn, &num_clobbers);
899 if (num_clobbers > 0 && added_clobbers_hard_reg_p (icode))
902 if (targetm.cannot_copy_insn_p && targetm.cannot_copy_insn_p (test_insn))
908 /* Return nonzero if the operands of expression X are unchanged from the
909 start of INSN's basic block up to but not including INSN (if AVAIL_P == 0),
910 or from INSN to the end of INSN's basic block (if AVAIL_P != 0). */
913 oprs_unchanged_p (const_rtx x, const_rtx insn, int avail_p)
927 struct reg_avail_info *info = ®_avail_info[REGNO (x)];
929 if (info->last_bb != current_bb)
932 return info->last_set < DF_INSN_LUID (insn);
934 return info->first_set >= DF_INSN_LUID (insn);
938 if (load_killed_in_block_p (current_bb, DF_INSN_LUID (insn),
942 return oprs_unchanged_p (XEXP (x, 0), insn, avail_p);
969 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
973 /* If we are about to do the last recursive call needed at this
974 level, change it into iteration. This function is called enough
977 return oprs_unchanged_p (XEXP (x, i), insn, avail_p);
979 else if (! oprs_unchanged_p (XEXP (x, i), insn, avail_p))
982 else if (fmt[i] == 'E')
983 for (j = 0; j < XVECLEN (x, i); j++)
984 if (! oprs_unchanged_p (XVECEXP (x, i, j), insn, avail_p))
991 /* Used for communication between mems_conflict_for_gcse_p and
992 load_killed_in_block_p. Nonzero if mems_conflict_for_gcse_p finds a
993 conflict between two memory references. */
994 static int gcse_mems_conflict_p;
996 /* Used for communication between mems_conflict_for_gcse_p and
997 load_killed_in_block_p. A memory reference for a load instruction,
998 mems_conflict_for_gcse_p will see if a memory store conflicts with
1000 static const_rtx gcse_mem_operand;
1002 /* DEST is the output of an instruction. If it is a memory reference, and
1003 possibly conflicts with the load found in gcse_mem_operand, then set
1004 gcse_mems_conflict_p to a nonzero value. */
1007 mems_conflict_for_gcse_p (rtx dest, const_rtx setter ATTRIBUTE_UNUSED,
1008 void *data ATTRIBUTE_UNUSED)
1010 while (GET_CODE (dest) == SUBREG
1011 || GET_CODE (dest) == ZERO_EXTRACT
1012 || GET_CODE (dest) == STRICT_LOW_PART)
1013 dest = XEXP (dest, 0);
1015 /* If DEST is not a MEM, then it will not conflict with the load. Note
1016 that function calls are assumed to clobber memory, but are handled
1021 /* If we are setting a MEM in our list of specially recognized MEMs,
1022 don't mark as killed this time. */
1024 if (expr_equiv_p (dest, gcse_mem_operand) && pre_ldst_mems != NULL)
1026 if (!find_rtx_in_ldst (dest))
1027 gcse_mems_conflict_p = 1;
1031 if (true_dependence (dest, GET_MODE (dest), gcse_mem_operand,
1033 gcse_mems_conflict_p = 1;
1036 /* Return nonzero if the expression in X (a memory reference) is killed
1037 in block BB before or after the insn with the LUID in UID_LIMIT.
1038 AVAIL_P is nonzero for kills after UID_LIMIT, and zero for kills
1041 To check the entire block, set UID_LIMIT to max_uid + 1 and
1045 load_killed_in_block_p (const_basic_block bb, int uid_limit, const_rtx x, int avail_p)
1047 rtx list_entry = modify_mem_list[bb->index];
1049 /* If this is a readonly then we aren't going to be changing it. */
1050 if (MEM_READONLY_P (x))
1056 /* Ignore entries in the list that do not apply. */
1058 && DF_INSN_LUID (XEXP (list_entry, 0)) < uid_limit)
1060 && DF_INSN_LUID (XEXP (list_entry, 0)) > uid_limit))
1062 list_entry = XEXP (list_entry, 1);
1066 setter = XEXP (list_entry, 0);
1068 /* If SETTER is a call everything is clobbered. Note that calls
1069 to pure functions are never put on the list, so we need not
1070 worry about them. */
1071 if (CALL_P (setter))
1074 /* SETTER must be an INSN of some kind that sets memory. Call
1075 note_stores to examine each hunk of memory that is modified.
1077 The note_stores interface is pretty limited, so we have to
1078 communicate via global variables. Yuk. */
1079 gcse_mem_operand = x;
1080 gcse_mems_conflict_p = 0;
1081 note_stores (PATTERN (setter), mems_conflict_for_gcse_p, NULL);
1082 if (gcse_mems_conflict_p)
1084 list_entry = XEXP (list_entry, 1);
1089 /* Return nonzero if the operands of expression X are unchanged from
1090 the start of INSN's basic block up to but not including INSN. */
1093 oprs_anticipatable_p (const_rtx x, const_rtx insn)
1095 return oprs_unchanged_p (x, insn, 0);
1098 /* Return nonzero if the operands of expression X are unchanged from
1099 INSN to the end of INSN's basic block. */
1102 oprs_available_p (const_rtx x, const_rtx insn)
1104 return oprs_unchanged_p (x, insn, 1);
1107 /* Hash expression X.
1109 MODE is only used if X is a CONST_INT. DO_NOT_RECORD_P is a boolean
1110 indicating if a volatile operand is found or if the expression contains
1111 something we don't want to insert in the table. HASH_TABLE_SIZE is
1112 the current size of the hash table to be probed. */
1115 hash_expr (const_rtx x, enum machine_mode mode, int *do_not_record_p,
1116 int hash_table_size)
1120 *do_not_record_p = 0;
1122 hash = hash_rtx (x, mode, do_not_record_p,
1123 NULL, /*have_reg_qty=*/false);
1124 return hash % hash_table_size;
1127 /* Hash a set of register REGNO.
1129 Sets are hashed on the register that is set. This simplifies the PRE copy
1132 ??? May need to make things more elaborate. Later, as necessary. */
1135 hash_set (int regno, int hash_table_size)
1140 return hash % hash_table_size;
1143 /* Return nonzero if exp1 is equivalent to exp2. */
1146 expr_equiv_p (const_rtx x, const_rtx y)
1148 return exp_equiv_p (x, y, 0, true);
1151 /* Insert expression X in INSN in the hash TABLE.
1152 If it is already present, record it as the last occurrence in INSN's
1155 MODE is the mode of the value X is being stored into.
1156 It is only used if X is a CONST_INT.
1158 ANTIC_P is nonzero if X is an anticipatable expression.
1159 AVAIL_P is nonzero if X is an available expression.
1161 MAX_DISTANCE is the maximum distance in instructions this expression can
1165 insert_expr_in_table (rtx x, enum machine_mode mode, rtx insn, int antic_p,
1166 int avail_p, int max_distance, struct hash_table_d *table)
1168 int found, do_not_record_p;
1170 struct expr *cur_expr, *last_expr = NULL;
1171 struct occr *antic_occr, *avail_occr;
1173 hash = hash_expr (x, mode, &do_not_record_p, table->size);
1175 /* Do not insert expression in table if it contains volatile operands,
1176 or if hash_expr determines the expression is something we don't want
1177 to or can't handle. */
1178 if (do_not_record_p)
1181 cur_expr = table->table[hash];
1184 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
1186 /* If the expression isn't found, save a pointer to the end of
1188 last_expr = cur_expr;
1189 cur_expr = cur_expr->next_same_hash;
1194 cur_expr = GOBNEW (struct expr);
1195 bytes_used += sizeof (struct expr);
1196 if (table->table[hash] == NULL)
1197 /* This is the first pattern that hashed to this index. */
1198 table->table[hash] = cur_expr;
1200 /* Add EXPR to end of this hash chain. */
1201 last_expr->next_same_hash = cur_expr;
1203 /* Set the fields of the expr element. */
1205 cur_expr->bitmap_index = table->n_elems++;
1206 cur_expr->next_same_hash = NULL;
1207 cur_expr->antic_occr = NULL;
1208 cur_expr->avail_occr = NULL;
1209 gcc_assert (max_distance >= 0);
1210 cur_expr->max_distance = max_distance;
1213 gcc_assert (cur_expr->max_distance == max_distance);
1215 /* Now record the occurrence(s). */
1218 antic_occr = cur_expr->antic_occr;
1221 && BLOCK_FOR_INSN (antic_occr->insn) != BLOCK_FOR_INSN (insn))
1225 /* Found another instance of the expression in the same basic block.
1226 Prefer the currently recorded one. We want the first one in the
1227 block and the block is scanned from start to end. */
1228 ; /* nothing to do */
1231 /* First occurrence of this expression in this basic block. */
1232 antic_occr = GOBNEW (struct occr);
1233 bytes_used += sizeof (struct occr);
1234 antic_occr->insn = insn;
1235 antic_occr->next = cur_expr->antic_occr;
1236 antic_occr->deleted_p = 0;
1237 cur_expr->antic_occr = antic_occr;
1243 avail_occr = cur_expr->avail_occr;
1246 && BLOCK_FOR_INSN (avail_occr->insn) == BLOCK_FOR_INSN (insn))
1248 /* Found another instance of the expression in the same basic block.
1249 Prefer this occurrence to the currently recorded one. We want
1250 the last one in the block and the block is scanned from start
1252 avail_occr->insn = insn;
1256 /* First occurrence of this expression in this basic block. */
1257 avail_occr = GOBNEW (struct occr);
1258 bytes_used += sizeof (struct occr);
1259 avail_occr->insn = insn;
1260 avail_occr->next = cur_expr->avail_occr;
1261 avail_occr->deleted_p = 0;
1262 cur_expr->avail_occr = avail_occr;
1267 /* Insert pattern X in INSN in the hash table.
1268 X is a SET of a reg to either another reg or a constant.
1269 If it is already present, record it as the last occurrence in INSN's
1273 insert_set_in_table (rtx x, rtx insn, struct hash_table_d *table)
1277 struct expr *cur_expr, *last_expr = NULL;
1278 struct occr *cur_occr;
1280 gcc_assert (GET_CODE (x) == SET && REG_P (SET_DEST (x)));
1282 hash = hash_set (REGNO (SET_DEST (x)), table->size);
1284 cur_expr = table->table[hash];
1287 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
1289 /* If the expression isn't found, save a pointer to the end of
1291 last_expr = cur_expr;
1292 cur_expr = cur_expr->next_same_hash;
1297 cur_expr = GOBNEW (struct expr);
1298 bytes_used += sizeof (struct expr);
1299 if (table->table[hash] == NULL)
1300 /* This is the first pattern that hashed to this index. */
1301 table->table[hash] = cur_expr;
1303 /* Add EXPR to end of this hash chain. */
1304 last_expr->next_same_hash = cur_expr;
1306 /* Set the fields of the expr element.
1307 We must copy X because it can be modified when copy propagation is
1308 performed on its operands. */
1309 cur_expr->expr = copy_rtx (x);
1310 cur_expr->bitmap_index = table->n_elems++;
1311 cur_expr->next_same_hash = NULL;
1312 cur_expr->antic_occr = NULL;
1313 cur_expr->avail_occr = NULL;
1314 /* Not used for set_p tables. */
1315 cur_expr->max_distance = 0;
1318 /* Now record the occurrence. */
1319 cur_occr = cur_expr->avail_occr;
1322 && BLOCK_FOR_INSN (cur_occr->insn) == BLOCK_FOR_INSN (insn))
1324 /* Found another instance of the expression in the same basic block.
1325 Prefer this occurrence to the currently recorded one. We want
1326 the last one in the block and the block is scanned from start
1328 cur_occr->insn = insn;
1332 /* First occurrence of this expression in this basic block. */
1333 cur_occr = GOBNEW (struct occr);
1334 bytes_used += sizeof (struct occr);
1335 cur_occr->insn = insn;
1336 cur_occr->next = cur_expr->avail_occr;
1337 cur_occr->deleted_p = 0;
1338 cur_expr->avail_occr = cur_occr;
1342 /* Determine whether the rtx X should be treated as a constant for
1343 the purposes of GCSE's constant propagation. */
1346 gcse_constant_p (const_rtx x)
1348 /* Consider a COMPARE of two integers constant. */
1349 if (GET_CODE (x) == COMPARE
1350 && CONST_INT_P (XEXP (x, 0))
1351 && CONST_INT_P (XEXP (x, 1)))
1354 /* Consider a COMPARE of the same registers is a constant
1355 if they are not floating point registers. */
1356 if (GET_CODE(x) == COMPARE
1357 && REG_P (XEXP (x, 0)) && REG_P (XEXP (x, 1))
1358 && REGNO (XEXP (x, 0)) == REGNO (XEXP (x, 1))
1359 && ! FLOAT_MODE_P (GET_MODE (XEXP (x, 0)))
1360 && ! FLOAT_MODE_P (GET_MODE (XEXP (x, 1))))
1363 /* Since X might be inserted more than once we have to take care that it
1365 return CONSTANT_P (x) && (GET_CODE (x) != CONST || shared_const_p (x));
1368 /* Scan pattern PAT of INSN and add an entry to the hash TABLE (set or
1372 hash_scan_set (rtx pat, rtx insn, struct hash_table_d *table)
1374 rtx src = SET_SRC (pat);
1375 rtx dest = SET_DEST (pat);
1378 if (GET_CODE (src) == CALL)
1379 hash_scan_call (src, insn, table);
1381 else if (REG_P (dest))
1383 unsigned int regno = REGNO (dest);
1385 int max_distance = 0;
1387 /* See if a REG_EQUAL note shows this equivalent to a simpler expression.
1389 This allows us to do a single GCSE pass and still eliminate
1390 redundant constants, addresses or other expressions that are
1391 constructed with multiple instructions.
1393 However, keep the original SRC if INSN is a simple reg-reg move. In
1394 In this case, there will almost always be a REG_EQUAL note on the
1395 insn that sets SRC. By recording the REG_EQUAL value here as SRC
1396 for INSN, we miss copy propagation opportunities and we perform the
1397 same PRE GCSE operation repeatedly on the same REG_EQUAL value if we
1398 do more than one PRE GCSE pass.
1400 Note that this does not impede profitable constant propagations. We
1401 "look through" reg-reg sets in lookup_avail_set. */
1402 note = find_reg_equal_equiv_note (insn);
1404 && REG_NOTE_KIND (note) == REG_EQUAL
1407 ? gcse_constant_p (XEXP (note, 0))
1408 : want_to_gcse_p (XEXP (note, 0), NULL)))
1409 src = XEXP (note, 0), pat = gen_rtx_SET (VOIDmode, dest, src);
1411 /* Only record sets of pseudo-regs in the hash table. */
1413 && regno >= FIRST_PSEUDO_REGISTER
1414 /* Don't GCSE something if we can't do a reg/reg copy. */
1415 && can_copy_p (GET_MODE (dest))
1416 /* GCSE commonly inserts instruction after the insn. We can't
1417 do that easily for EH edges so disable GCSE on these for now. */
1418 /* ??? We can now easily create new EH landing pads at the
1419 gimple level, for splitting edges; there's no reason we
1420 can't do the same thing at the rtl level. */
1421 && !can_throw_internal (insn)
1422 /* Is SET_SRC something we want to gcse? */
1423 && want_to_gcse_p (src, &max_distance)
1424 /* Don't CSE a nop. */
1425 && ! set_noop_p (pat)
1426 /* Don't GCSE if it has attached REG_EQUIV note.
1427 At this point this only function parameters should have
1428 REG_EQUIV notes and if the argument slot is used somewhere
1429 explicitly, it means address of parameter has been taken,
1430 so we should not extend the lifetime of the pseudo. */
1431 && (note == NULL_RTX || ! MEM_P (XEXP (note, 0))))
1433 /* An expression is not anticipatable if its operands are
1434 modified before this insn or if this is not the only SET in
1435 this insn. The latter condition does not have to mean that
1436 SRC itself is not anticipatable, but we just will not be
1437 able to handle code motion of insns with multiple sets. */
1438 int antic_p = oprs_anticipatable_p (src, insn)
1439 && !multiple_sets (insn);
1440 /* An expression is not available if its operands are
1441 subsequently modified, including this insn. It's also not
1442 available if this is a branch, because we can't insert
1443 a set after the branch. */
1444 int avail_p = (oprs_available_p (src, insn)
1445 && ! JUMP_P (insn));
1447 insert_expr_in_table (src, GET_MODE (dest), insn, antic_p, avail_p,
1448 max_distance, table);
1451 /* Record sets for constant/copy propagation. */
1452 else if (table->set_p
1453 && regno >= FIRST_PSEUDO_REGISTER
1455 && REGNO (src) >= FIRST_PSEUDO_REGISTER
1456 && can_copy_p (GET_MODE (dest))
1457 && REGNO (src) != regno)
1458 || gcse_constant_p (src))
1459 /* A copy is not available if its src or dest is subsequently
1460 modified. Here we want to search from INSN+1 on, but
1461 oprs_available_p searches from INSN on. */
1462 && (insn == BB_END (BLOCK_FOR_INSN (insn))
1463 || (tmp = next_nonnote_insn (insn)) == NULL_RTX
1464 || BLOCK_FOR_INSN (tmp) != BLOCK_FOR_INSN (insn)
1465 || oprs_available_p (pat, tmp)))
1466 insert_set_in_table (pat, insn, table);
1468 /* In case of store we want to consider the memory value as available in
1469 the REG stored in that memory. This makes it possible to remove
1470 redundant loads from due to stores to the same location. */
1471 else if (flag_gcse_las && REG_P (src) && MEM_P (dest))
1473 unsigned int regno = REGNO (src);
1475 /* Do not do this for constant/copy propagation. */
1477 /* Only record sets of pseudo-regs in the hash table. */
1478 && regno >= FIRST_PSEUDO_REGISTER
1479 /* Don't GCSE something if we can't do a reg/reg copy. */
1480 && can_copy_p (GET_MODE (src))
1481 /* GCSE commonly inserts instruction after the insn. We can't
1482 do that easily for EH edges so disable GCSE on these for now. */
1483 && !can_throw_internal (insn)
1484 /* Is SET_DEST something we want to gcse? */
1485 && want_to_gcse_p (dest, NULL)
1486 /* Don't CSE a nop. */
1487 && ! set_noop_p (pat)
1488 /* Don't GCSE if it has attached REG_EQUIV note.
1489 At this point this only function parameters should have
1490 REG_EQUIV notes and if the argument slot is used somewhere
1491 explicitly, it means address of parameter has been taken,
1492 so we should not extend the lifetime of the pseudo. */
1493 && ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) == 0
1494 || ! MEM_P (XEXP (note, 0))))
1496 /* Stores are never anticipatable. */
1498 /* An expression is not available if its operands are
1499 subsequently modified, including this insn. It's also not
1500 available if this is a branch, because we can't insert
1501 a set after the branch. */
1502 int avail_p = oprs_available_p (dest, insn)
1505 /* Record the memory expression (DEST) in the hash table. */
1506 insert_expr_in_table (dest, GET_MODE (dest), insn,
1507 antic_p, avail_p, 0, table);
1513 hash_scan_clobber (rtx x ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED,
1514 struct hash_table_d *table ATTRIBUTE_UNUSED)
1516 /* Currently nothing to do. */
1520 hash_scan_call (rtx x ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED,
1521 struct hash_table_d *table ATTRIBUTE_UNUSED)
1523 /* Currently nothing to do. */
1526 /* Process INSN and add hash table entries as appropriate.
1528 Only available expressions that set a single pseudo-reg are recorded.
1530 Single sets in a PARALLEL could be handled, but it's an extra complication
1531 that isn't dealt with right now. The trick is handling the CLOBBERs that
1532 are also in the PARALLEL. Later.
1534 If SET_P is nonzero, this is for the assignment hash table,
1535 otherwise it is for the expression hash table. */
1538 hash_scan_insn (rtx insn, struct hash_table_d *table)
1540 rtx pat = PATTERN (insn);
1543 /* Pick out the sets of INSN and for other forms of instructions record
1544 what's been modified. */
1546 if (GET_CODE (pat) == SET)
1547 hash_scan_set (pat, insn, table);
1548 else if (GET_CODE (pat) == PARALLEL)
1549 for (i = 0; i < XVECLEN (pat, 0); i++)
1551 rtx x = XVECEXP (pat, 0, i);
1553 if (GET_CODE (x) == SET)
1554 hash_scan_set (x, insn, table);
1555 else if (GET_CODE (x) == CLOBBER)
1556 hash_scan_clobber (x, insn, table);
1557 else if (GET_CODE (x) == CALL)
1558 hash_scan_call (x, insn, table);
1561 else if (GET_CODE (pat) == CLOBBER)
1562 hash_scan_clobber (pat, insn, table);
1563 else if (GET_CODE (pat) == CALL)
1564 hash_scan_call (pat, insn, table);
1568 dump_hash_table (FILE *file, const char *name, struct hash_table_d *table)
1571 /* Flattened out table, so it's printed in proper order. */
1572 struct expr **flat_table;
1573 unsigned int *hash_val;
1576 flat_table = XCNEWVEC (struct expr *, table->n_elems);
1577 hash_val = XNEWVEC (unsigned int, table->n_elems);
1579 for (i = 0; i < (int) table->size; i++)
1580 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
1582 flat_table[expr->bitmap_index] = expr;
1583 hash_val[expr->bitmap_index] = i;
1586 fprintf (file, "%s hash table (%d buckets, %d entries)\n",
1587 name, table->size, table->n_elems);
1589 for (i = 0; i < (int) table->n_elems; i++)
1590 if (flat_table[i] != 0)
1592 expr = flat_table[i];
1593 fprintf (file, "Index %d (hash value %d; max distance %d)\n ",
1594 expr->bitmap_index, hash_val[i], expr->max_distance);
1595 print_rtl (file, expr->expr);
1596 fprintf (file, "\n");
1599 fprintf (file, "\n");
1605 /* Record register first/last/block set information for REGNO in INSN.
1607 first_set records the first place in the block where the register
1608 is set and is used to compute "anticipatability".
1610 last_set records the last place in the block where the register
1611 is set and is used to compute "availability".
1613 last_bb records the block for which first_set and last_set are
1614 valid, as a quick test to invalidate them. */
1617 record_last_reg_set_info (rtx insn, int regno)
1619 struct reg_avail_info *info = ®_avail_info[regno];
1620 int luid = DF_INSN_LUID (insn);
1622 info->last_set = luid;
1623 if (info->last_bb != current_bb)
1625 info->last_bb = current_bb;
1626 info->first_set = luid;
1631 /* Record all of the canonicalized MEMs of record_last_mem_set_info's insn.
1632 Note we store a pair of elements in the list, so they have to be
1633 taken off pairwise. */
1636 canon_list_insert (rtx dest ATTRIBUTE_UNUSED, const_rtx unused1 ATTRIBUTE_UNUSED,
1639 rtx dest_addr, insn;
1642 while (GET_CODE (dest) == SUBREG
1643 || GET_CODE (dest) == ZERO_EXTRACT
1644 || GET_CODE (dest) == STRICT_LOW_PART)
1645 dest = XEXP (dest, 0);
1647 /* If DEST is not a MEM, then it will not conflict with a load. Note
1648 that function calls are assumed to clobber memory, but are handled
1654 dest_addr = get_addr (XEXP (dest, 0));
1655 dest_addr = canon_rtx (dest_addr);
1656 insn = (rtx) v_insn;
1657 bb = BLOCK_FOR_INSN (insn)->index;
1659 canon_modify_mem_list[bb] =
1660 alloc_EXPR_LIST (VOIDmode, dest_addr, canon_modify_mem_list[bb]);
1661 canon_modify_mem_list[bb] =
1662 alloc_EXPR_LIST (VOIDmode, dest, canon_modify_mem_list[bb]);
1665 /* Record memory modification information for INSN. We do not actually care
1666 about the memory location(s) that are set, or even how they are set (consider
1667 a CALL_INSN). We merely need to record which insns modify memory. */
1670 record_last_mem_set_info (rtx insn)
1672 int bb = BLOCK_FOR_INSN (insn)->index;
1674 /* load_killed_in_block_p will handle the case of calls clobbering
1676 modify_mem_list[bb] = alloc_INSN_LIST (insn, modify_mem_list[bb]);
1677 bitmap_set_bit (modify_mem_list_set, bb);
1681 /* Note that traversals of this loop (other than for free-ing)
1682 will break after encountering a CALL_INSN. So, there's no
1683 need to insert a pair of items, as canon_list_insert does. */
1684 canon_modify_mem_list[bb] =
1685 alloc_INSN_LIST (insn, canon_modify_mem_list[bb]);
1686 bitmap_set_bit (blocks_with_calls, bb);
1689 note_stores (PATTERN (insn), canon_list_insert, (void*) insn);
1692 /* Called from compute_hash_table via note_stores to handle one
1693 SET or CLOBBER in an insn. DATA is really the instruction in which
1694 the SET is taking place. */
1697 record_last_set_info (rtx dest, const_rtx setter ATTRIBUTE_UNUSED, void *data)
1699 rtx last_set_insn = (rtx) data;
1701 if (GET_CODE (dest) == SUBREG)
1702 dest = SUBREG_REG (dest);
1705 record_last_reg_set_info (last_set_insn, REGNO (dest));
1706 else if (MEM_P (dest)
1707 /* Ignore pushes, they clobber nothing. */
1708 && ! push_operand (dest, GET_MODE (dest)))
1709 record_last_mem_set_info (last_set_insn);
1712 /* Top level function to create an expression or assignment hash table.
1714 Expression entries are placed in the hash table if
1715 - they are of the form (set (pseudo-reg) src),
1716 - src is something we want to perform GCSE on,
1717 - none of the operands are subsequently modified in the block
1719 Assignment entries are placed in the hash table if
1720 - they are of the form (set (pseudo-reg) src),
1721 - src is something we want to perform const/copy propagation on,
1722 - none of the operands or target are subsequently modified in the block
1724 Currently src must be a pseudo-reg or a const_int.
1726 TABLE is the table computed. */
1729 compute_hash_table_work (struct hash_table_d *table)
1733 /* re-Cache any INSN_LIST nodes we have allocated. */
1734 clear_modify_mem_tables ();
1735 /* Some working arrays used to track first and last set in each block. */
1736 reg_avail_info = GNEWVEC (struct reg_avail_info, max_reg_num ());
1738 for (i = 0; i < max_reg_num (); ++i)
1739 reg_avail_info[i].last_bb = NULL;
1741 FOR_EACH_BB (current_bb)
1746 /* First pass over the instructions records information used to
1747 determine when registers and memory are first and last set. */
1748 FOR_BB_INSNS (current_bb, insn)
1750 if (! INSN_P (insn))
1755 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
1756 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
1757 record_last_reg_set_info (insn, regno);
1762 note_stores (PATTERN (insn), record_last_set_info, insn);
1765 /* Insert implicit sets in the hash table. */
1767 && implicit_sets[current_bb->index] != NULL_RTX)
1768 hash_scan_set (implicit_sets[current_bb->index],
1769 BB_HEAD (current_bb), table);
1771 /* The next pass builds the hash table. */
1772 FOR_BB_INSNS (current_bb, insn)
1774 hash_scan_insn (insn, table);
1777 free (reg_avail_info);
1778 reg_avail_info = NULL;
1781 /* Allocate space for the set/expr hash TABLE.
1782 It is used to determine the number of buckets to use.
1783 SET_P determines whether set or expression table will
1787 alloc_hash_table (struct hash_table_d *table, int set_p)
1791 n = get_max_insn_count ();
1793 table->size = n / 4;
1794 if (table->size < 11)
1797 /* Attempt to maintain efficient use of hash table.
1798 Making it an odd number is simplest for now.
1799 ??? Later take some measurements. */
1801 n = table->size * sizeof (struct expr *);
1802 table->table = GNEWVAR (struct expr *, n);
1803 table->set_p = set_p;
1806 /* Free things allocated by alloc_hash_table. */
1809 free_hash_table (struct hash_table_d *table)
1811 free (table->table);
1814 /* Compute the hash TABLE for doing copy/const propagation or
1815 expression hash table. */
1818 compute_hash_table (struct hash_table_d *table)
1820 /* Initialize count of number of entries in hash table. */
1822 memset (table->table, 0, table->size * sizeof (struct expr *));
1824 compute_hash_table_work (table);
1827 /* Expression tracking support. */
1829 /* Lookup REGNO in the set TABLE. The result is a pointer to the
1830 table entry, or NULL if not found. */
1832 static struct expr *
1833 lookup_set (unsigned int regno, struct hash_table_d *table)
1835 unsigned int hash = hash_set (regno, table->size);
1838 expr = table->table[hash];
1840 while (expr && REGNO (SET_DEST (expr->expr)) != regno)
1841 expr = expr->next_same_hash;
1846 /* Return the next entry for REGNO in list EXPR. */
1848 static struct expr *
1849 next_set (unsigned int regno, struct expr *expr)
1852 expr = expr->next_same_hash;
1853 while (expr && REGNO (SET_DEST (expr->expr)) != regno);
1858 /* Like free_INSN_LIST_list or free_EXPR_LIST_list, except that the node
1859 types may be mixed. */
1862 free_insn_expr_list_list (rtx *listp)
1866 for (list = *listp; list ; list = next)
1868 next = XEXP (list, 1);
1869 if (GET_CODE (list) == EXPR_LIST)
1870 free_EXPR_LIST_node (list);
1872 free_INSN_LIST_node (list);
1878 /* Clear canon_modify_mem_list and modify_mem_list tables. */
1880 clear_modify_mem_tables (void)
1885 EXECUTE_IF_SET_IN_BITMAP (modify_mem_list_set, 0, i, bi)
1887 free_INSN_LIST_list (modify_mem_list + i);
1888 free_insn_expr_list_list (canon_modify_mem_list + i);
1890 bitmap_clear (modify_mem_list_set);
1891 bitmap_clear (blocks_with_calls);
1894 /* Release memory used by modify_mem_list_set. */
1897 free_modify_mem_tables (void)
1899 clear_modify_mem_tables ();
1900 free (modify_mem_list);
1901 free (canon_modify_mem_list);
1902 modify_mem_list = 0;
1903 canon_modify_mem_list = 0;
1906 /* Reset tables used to keep track of what's still available [since the
1907 start of the block]. */
1910 reset_opr_set_tables (void)
1912 /* Maintain a bitmap of which regs have been set since beginning of
1914 CLEAR_REG_SET (reg_set_bitmap);
1916 /* Also keep a record of the last instruction to modify memory.
1917 For now this is very trivial, we only record whether any memory
1918 location has been modified. */
1919 clear_modify_mem_tables ();
1922 /* Return nonzero if the operands of X are not set before INSN in
1923 INSN's basic block. */
1926 oprs_not_set_p (const_rtx x, const_rtx insn)
1935 code = GET_CODE (x);
1952 if (load_killed_in_block_p (BLOCK_FOR_INSN (insn),
1953 DF_INSN_LUID (insn), x, 0))
1956 return oprs_not_set_p (XEXP (x, 0), insn);
1959 return ! REGNO_REG_SET_P (reg_set_bitmap, REGNO (x));
1965 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
1969 /* If we are about to do the last recursive call
1970 needed at this level, change it into iteration.
1971 This function is called enough to be worth it. */
1973 return oprs_not_set_p (XEXP (x, i), insn);
1975 if (! oprs_not_set_p (XEXP (x, i), insn))
1978 else if (fmt[i] == 'E')
1979 for (j = 0; j < XVECLEN (x, i); j++)
1980 if (! oprs_not_set_p (XVECEXP (x, i, j), insn))
1987 /* Mark things set by a CALL. */
1990 mark_call (rtx insn)
1992 if (! RTL_CONST_OR_PURE_CALL_P (insn))
1993 record_last_mem_set_info (insn);
1996 /* Mark things set by a SET. */
1999 mark_set (rtx pat, rtx insn)
2001 rtx dest = SET_DEST (pat);
2003 while (GET_CODE (dest) == SUBREG
2004 || GET_CODE (dest) == ZERO_EXTRACT
2005 || GET_CODE (dest) == STRICT_LOW_PART)
2006 dest = XEXP (dest, 0);
2009 SET_REGNO_REG_SET (reg_set_bitmap, REGNO (dest));
2010 else if (MEM_P (dest))
2011 record_last_mem_set_info (insn);
2013 if (GET_CODE (SET_SRC (pat)) == CALL)
2017 /* Record things set by a CLOBBER. */
2020 mark_clobber (rtx pat, rtx insn)
2022 rtx clob = XEXP (pat, 0);
2024 while (GET_CODE (clob) == SUBREG || GET_CODE (clob) == STRICT_LOW_PART)
2025 clob = XEXP (clob, 0);
2028 SET_REGNO_REG_SET (reg_set_bitmap, REGNO (clob));
2030 record_last_mem_set_info (insn);
2033 /* Record things set by INSN.
2034 This data is used by oprs_not_set_p. */
2037 mark_oprs_set (rtx insn)
2039 rtx pat = PATTERN (insn);
2042 if (GET_CODE (pat) == SET)
2043 mark_set (pat, insn);
2044 else if (GET_CODE (pat) == PARALLEL)
2045 for (i = 0; i < XVECLEN (pat, 0); i++)
2047 rtx x = XVECEXP (pat, 0, i);
2049 if (GET_CODE (x) == SET)
2051 else if (GET_CODE (x) == CLOBBER)
2052 mark_clobber (x, insn);
2053 else if (GET_CODE (x) == CALL)
2057 else if (GET_CODE (pat) == CLOBBER)
2058 mark_clobber (pat, insn);
2059 else if (GET_CODE (pat) == CALL)
2064 /* Compute copy/constant propagation working variables. */
2066 /* Local properties of assignments. */
2067 static sbitmap *cprop_pavloc;
2068 static sbitmap *cprop_absaltered;
2070 /* Global properties of assignments (computed from the local properties). */
2071 static sbitmap *cprop_avin;
2072 static sbitmap *cprop_avout;
2074 /* Allocate vars used for copy/const propagation. N_BLOCKS is the number of
2075 basic blocks. N_SETS is the number of sets. */
2078 alloc_cprop_mem (int n_blocks, int n_sets)
2080 cprop_pavloc = sbitmap_vector_alloc (n_blocks, n_sets);
2081 cprop_absaltered = sbitmap_vector_alloc (n_blocks, n_sets);
2083 cprop_avin = sbitmap_vector_alloc (n_blocks, n_sets);
2084 cprop_avout = sbitmap_vector_alloc (n_blocks, n_sets);
2087 /* Free vars used by copy/const propagation. */
2090 free_cprop_mem (void)
2092 sbitmap_vector_free (cprop_pavloc);
2093 sbitmap_vector_free (cprop_absaltered);
2094 sbitmap_vector_free (cprop_avin);
2095 sbitmap_vector_free (cprop_avout);
2098 /* For each block, compute whether X is transparent. X is either an
2099 expression or an assignment [though we don't care which, for this context
2100 an assignment is treated as an expression]. For each block where an
2101 element of X is modified, set (SET_P == 1) or reset (SET_P == 0) the INDX
2105 compute_transp (const_rtx x, int indx, sbitmap *bmap, int set_p)
2111 /* repeat is used to turn tail-recursion into iteration since GCC
2112 can't do it when there's no return value. */
2118 code = GET_CODE (x);
2125 for (def = DF_REG_DEF_CHAIN (REGNO (x));
2127 def = DF_REF_NEXT_REG (def))
2128 SET_BIT (bmap[DF_REF_BB (def)->index], indx);
2133 for (def = DF_REG_DEF_CHAIN (REGNO (x));
2135 def = DF_REF_NEXT_REG (def))
2136 RESET_BIT (bmap[DF_REF_BB (def)->index], indx);
2142 if (! MEM_READONLY_P (x))
2147 /* First handle all the blocks with calls. We don't need to
2148 do any list walking for them. */
2149 EXECUTE_IF_SET_IN_BITMAP (blocks_with_calls, 0, bb_index, bi)
2152 SET_BIT (bmap[bb_index], indx);
2154 RESET_BIT (bmap[bb_index], indx);
2157 /* Now iterate over the blocks which have memory modifications
2158 but which do not have any calls. */
2159 EXECUTE_IF_AND_COMPL_IN_BITMAP (modify_mem_list_set,
2163 rtx list_entry = canon_modify_mem_list[bb_index];
2167 rtx dest, dest_addr;
2169 /* LIST_ENTRY must be an INSN of some kind that sets memory.
2170 Examine each hunk of memory that is modified. */
2172 dest = XEXP (list_entry, 0);
2173 list_entry = XEXP (list_entry, 1);
2174 dest_addr = XEXP (list_entry, 0);
2176 if (canon_true_dependence (dest, GET_MODE (dest), dest_addr,
2177 x, NULL_RTX, rtx_addr_varies_p))
2180 SET_BIT (bmap[bb_index], indx);
2182 RESET_BIT (bmap[bb_index], indx);
2185 list_entry = XEXP (list_entry, 1);
2210 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2214 /* If we are about to do the last recursive call
2215 needed at this level, change it into iteration.
2216 This function is called enough to be worth it. */
2223 compute_transp (XEXP (x, i), indx, bmap, set_p);
2225 else if (fmt[i] == 'E')
2226 for (j = 0; j < XVECLEN (x, i); j++)
2227 compute_transp (XVECEXP (x, i, j), indx, bmap, set_p);
2231 /* Top level routine to do the dataflow analysis needed by copy/const
2235 compute_cprop_data (void)
2237 compute_local_properties (cprop_absaltered, cprop_pavloc, NULL, &set_hash_table);
2238 compute_available (cprop_pavloc, cprop_absaltered,
2239 cprop_avout, cprop_avin);
2242 /* Copy/constant propagation. */
2244 /* Maximum number of register uses in an insn that we handle. */
2247 /* Table of uses found in an insn.
2248 Allocated statically to avoid alloc/free complexity and overhead. */
2249 static struct reg_use reg_use_table[MAX_USES];
2251 /* Index into `reg_use_table' while building it. */
2252 static int reg_use_count;
2254 /* Set up a list of register numbers used in INSN. The found uses are stored
2255 in `reg_use_table'. `reg_use_count' is initialized to zero before entry,
2256 and contains the number of uses in the table upon exit.
2258 ??? If a register appears multiple times we will record it multiple times.
2259 This doesn't hurt anything but it will slow things down. */
2262 find_used_regs (rtx *xptr, void *data ATTRIBUTE_UNUSED)
2269 /* repeat is used to turn tail-recursion into iteration since GCC
2270 can't do it when there's no return value. */
2275 code = GET_CODE (x);
2278 if (reg_use_count == MAX_USES)
2281 reg_use_table[reg_use_count].reg_rtx = x;
2285 /* Recursively scan the operands of this expression. */
2287 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2291 /* If we are about to do the last recursive call
2292 needed at this level, change it into iteration.
2293 This function is called enough to be worth it. */
2300 find_used_regs (&XEXP (x, i), data);
2302 else if (fmt[i] == 'E')
2303 for (j = 0; j < XVECLEN (x, i); j++)
2304 find_used_regs (&XVECEXP (x, i, j), data);
2308 /* Try to replace all non-SET_DEST occurrences of FROM in INSN with TO.
2309 Returns nonzero is successful. */
2312 try_replace_reg (rtx from, rtx to, rtx insn)
2314 rtx note = find_reg_equal_equiv_note (insn);
2317 rtx set = single_set (insn);
2319 /* Usually we substitute easy stuff, so we won't copy everything.
2320 We however need to take care to not duplicate non-trivial CONST
2324 validate_replace_src_group (from, to, insn);
2325 if (num_changes_pending () && apply_change_group ())
2328 /* Try to simplify SET_SRC if we have substituted a constant. */
2329 if (success && set && CONSTANT_P (to))
2331 src = simplify_rtx (SET_SRC (set));
2334 validate_change (insn, &SET_SRC (set), src, 0);
2337 /* If there is already a REG_EQUAL note, update the expression in it
2338 with our replacement. */
2339 if (note != 0 && REG_NOTE_KIND (note) == REG_EQUAL)
2340 set_unique_reg_note (insn, REG_EQUAL,
2341 simplify_replace_rtx (XEXP (note, 0), from, to));
2342 if (!success && set && reg_mentioned_p (from, SET_SRC (set)))
2344 /* If above failed and this is a single set, try to simplify the source of
2345 the set given our substitution. We could perhaps try this for multiple
2346 SETs, but it probably won't buy us anything. */
2347 src = simplify_replace_rtx (SET_SRC (set), from, to);
2349 if (!rtx_equal_p (src, SET_SRC (set))
2350 && validate_change (insn, &SET_SRC (set), src, 0))
2353 /* If we've failed to do replacement, have a single SET, don't already
2354 have a note, and have no special SET, add a REG_EQUAL note to not
2355 lose information. */
2356 if (!success && note == 0 && set != 0
2357 && GET_CODE (SET_DEST (set)) != ZERO_EXTRACT
2358 && GET_CODE (SET_DEST (set)) != STRICT_LOW_PART)
2359 note = set_unique_reg_note (insn, REG_EQUAL, copy_rtx (src));
2362 /* REG_EQUAL may get simplified into register.
2363 We don't allow that. Remove that note. This code ought
2364 not to happen, because previous code ought to synthesize
2365 reg-reg move, but be on the safe side. */
2366 if (note && REG_NOTE_KIND (note) == REG_EQUAL && REG_P (XEXP (note, 0)))
2367 remove_note (insn, note);
2372 /* Find a set of REGNOs that are available on entry to INSN's block. Returns
2373 NULL no such set is found. */
2375 static struct expr *
2376 find_avail_set (int regno, rtx insn)
2378 /* SET1 contains the last set found that can be returned to the caller for
2379 use in a substitution. */
2380 struct expr *set1 = 0;
2382 /* Loops are not possible here. To get a loop we would need two sets
2383 available at the start of the block containing INSN. i.e. we would
2384 need two sets like this available at the start of the block:
2386 (set (reg X) (reg Y))
2387 (set (reg Y) (reg X))
2389 This can not happen since the set of (reg Y) would have killed the
2390 set of (reg X) making it unavailable at the start of this block. */
2394 struct expr *set = lookup_set (regno, &set_hash_table);
2396 /* Find a set that is available at the start of the block
2397 which contains INSN. */
2400 if (TEST_BIT (cprop_avin[BLOCK_FOR_INSN (insn)->index],
2403 set = next_set (regno, set);
2406 /* If no available set was found we've reached the end of the
2407 (possibly empty) copy chain. */
2411 gcc_assert (GET_CODE (set->expr) == SET);
2413 src = SET_SRC (set->expr);
2415 /* We know the set is available.
2416 Now check that SRC is ANTLOC (i.e. none of the source operands
2417 have changed since the start of the block).
2419 If the source operand changed, we may still use it for the next
2420 iteration of this loop, but we may not use it for substitutions. */
2422 if (gcse_constant_p (src) || oprs_not_set_p (src, insn))
2425 /* If the source of the set is anything except a register, then
2426 we have reached the end of the copy chain. */
2430 /* Follow the copy chain, i.e. start another iteration of the loop
2431 and see if we have an available copy into SRC. */
2432 regno = REGNO (src);
2435 /* SET1 holds the last set that was available and anticipatable at
2440 /* Subroutine of cprop_insn that tries to propagate constants into
2441 JUMP_INSNS. JUMP must be a conditional jump. If SETCC is non-NULL
2442 it is the instruction that immediately precedes JUMP, and must be a
2443 single SET of a register. FROM is what we will try to replace,
2444 SRC is the constant we will try to substitute for it. Returns nonzero
2445 if a change was made. */
2448 cprop_jump (basic_block bb, rtx setcc, rtx jump, rtx from, rtx src)
2450 rtx new_rtx, set_src, note_src;
2451 rtx set = pc_set (jump);
2452 rtx note = find_reg_equal_equiv_note (jump);
2456 note_src = XEXP (note, 0);
2457 if (GET_CODE (note_src) == EXPR_LIST)
2458 note_src = NULL_RTX;
2460 else note_src = NULL_RTX;
2462 /* Prefer REG_EQUAL notes except those containing EXPR_LISTs. */
2463 set_src = note_src ? note_src : SET_SRC (set);
2465 /* First substitute the SETCC condition into the JUMP instruction,
2466 then substitute that given values into this expanded JUMP. */
2467 if (setcc != NULL_RTX
2468 && !modified_between_p (from, setcc, jump)
2469 && !modified_between_p (src, setcc, jump))
2472 rtx setcc_set = single_set (setcc);
2473 rtx setcc_note = find_reg_equal_equiv_note (setcc);
2474 setcc_src = (setcc_note && GET_CODE (XEXP (setcc_note, 0)) != EXPR_LIST)
2475 ? XEXP (setcc_note, 0) : SET_SRC (setcc_set);
2476 set_src = simplify_replace_rtx (set_src, SET_DEST (setcc_set),
2482 new_rtx = simplify_replace_rtx (set_src, from, src);
2484 /* If no simplification can be made, then try the next register. */
2485 if (rtx_equal_p (new_rtx, SET_SRC (set)))
2488 /* If this is now a no-op delete it, otherwise this must be a valid insn. */
2489 if (new_rtx == pc_rtx)
2493 /* Ensure the value computed inside the jump insn to be equivalent
2494 to one computed by setcc. */
2495 if (setcc && modified_in_p (new_rtx, setcc))
2497 if (! validate_unshare_change (jump, &SET_SRC (set), new_rtx, 0))
2499 /* When (some) constants are not valid in a comparison, and there
2500 are two registers to be replaced by constants before the entire
2501 comparison can be folded into a constant, we need to keep
2502 intermediate information in REG_EQUAL notes. For targets with
2503 separate compare insns, such notes are added by try_replace_reg.
2504 When we have a combined compare-and-branch instruction, however,
2505 we need to attach a note to the branch itself to make this
2506 optimization work. */
2508 if (!rtx_equal_p (new_rtx, note_src))
2509 set_unique_reg_note (jump, REG_EQUAL, copy_rtx (new_rtx));
2513 /* Remove REG_EQUAL note after simplification. */
2515 remove_note (jump, note);
2519 /* Delete the cc0 setter. */
2520 if (setcc != NULL && CC0_P (SET_DEST (single_set (setcc))))
2521 delete_insn (setcc);
2524 global_const_prop_count++;
2525 if (dump_file != NULL)
2528 "GLOBAL CONST-PROP: Replacing reg %d in jump_insn %d with constant ",
2529 REGNO (from), INSN_UID (jump));
2530 print_rtl (dump_file, src);
2531 fprintf (dump_file, "\n");
2533 purge_dead_edges (bb);
2535 /* If a conditional jump has been changed into unconditional jump, remove
2536 the jump and make the edge fallthru - this is always called in
2538 if (new_rtx != pc_rtx && simplejump_p (jump))
2543 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); ei_next (&ei))
2544 if (e->dest != EXIT_BLOCK_PTR
2545 && BB_HEAD (e->dest) == JUMP_LABEL (jump))
2547 e->flags |= EDGE_FALLTHRU;
2557 constprop_register (rtx insn, rtx from, rtx to)
2561 /* Check for reg or cc0 setting instructions followed by
2562 conditional branch instructions first. */
2563 if ((sset = single_set (insn)) != NULL
2565 && any_condjump_p (NEXT_INSN (insn)) && onlyjump_p (NEXT_INSN (insn)))
2567 rtx dest = SET_DEST (sset);
2568 if ((REG_P (dest) || CC0_P (dest))
2569 && cprop_jump (BLOCK_FOR_INSN (insn), insn, NEXT_INSN (insn), from, to))
2573 /* Handle normal insns next. */
2574 if (NONJUMP_INSN_P (insn)
2575 && try_replace_reg (from, to, insn))
2578 /* Try to propagate a CONST_INT into a conditional jump.
2579 We're pretty specific about what we will handle in this
2580 code, we can extend this as necessary over time.
2582 Right now the insn in question must look like
2583 (set (pc) (if_then_else ...)) */
2584 else if (any_condjump_p (insn) && onlyjump_p (insn))
2585 return cprop_jump (BLOCK_FOR_INSN (insn), NULL, insn, from, to);
2589 /* Perform constant and copy propagation on INSN.
2590 The result is nonzero if a change was made. */
2593 cprop_insn (rtx insn)
2595 struct reg_use *reg_used;
2603 note_uses (&PATTERN (insn), find_used_regs, NULL);
2605 note = find_reg_equal_equiv_note (insn);
2607 /* We may win even when propagating constants into notes. */
2609 find_used_regs (&XEXP (note, 0), NULL);
2611 for (reg_used = ®_use_table[0]; reg_use_count > 0;
2612 reg_used++, reg_use_count--)
2614 unsigned int regno = REGNO (reg_used->reg_rtx);
2618 /* If the register has already been set in this block, there's
2619 nothing we can do. */
2620 if (! oprs_not_set_p (reg_used->reg_rtx, insn))
2623 /* Find an assignment that sets reg_used and is available
2624 at the start of the block. */
2625 set = find_avail_set (regno, insn);
2630 /* ??? We might be able to handle PARALLELs. Later. */
2631 gcc_assert (GET_CODE (pat) == SET);
2633 src = SET_SRC (pat);
2635 /* Constant propagation. */
2636 if (gcse_constant_p (src))
2638 if (constprop_register (insn, reg_used->reg_rtx, src))
2641 global_const_prop_count++;
2642 if (dump_file != NULL)
2644 fprintf (dump_file, "GLOBAL CONST-PROP: Replacing reg %d in ", regno);
2645 fprintf (dump_file, "insn %d with constant ", INSN_UID (insn));
2646 print_rtl (dump_file, src);
2647 fprintf (dump_file, "\n");
2649 if (INSN_DELETED_P (insn))
2653 else if (REG_P (src)
2654 && REGNO (src) >= FIRST_PSEUDO_REGISTER
2655 && REGNO (src) != regno)
2657 if (try_replace_reg (reg_used->reg_rtx, src, insn))
2660 global_copy_prop_count++;
2661 if (dump_file != NULL)
2663 fprintf (dump_file, "GLOBAL COPY-PROP: Replacing reg %d in insn %d",
2664 regno, INSN_UID (insn));
2665 fprintf (dump_file, " with reg %d\n", REGNO (src));
2668 /* The original insn setting reg_used may or may not now be
2669 deletable. We leave the deletion to flow. */
2670 /* FIXME: If it turns out that the insn isn't deletable,
2671 then we may have unnecessarily extended register lifetimes
2672 and made things worse. */
2677 if (changed && DEBUG_INSN_P (insn))
2683 /* Like find_used_regs, but avoid recording uses that appear in
2684 input-output contexts such as zero_extract or pre_dec. This
2685 restricts the cases we consider to those for which local cprop
2686 can legitimately make replacements. */
2689 local_cprop_find_used_regs (rtx *xptr, void *data)
2696 switch (GET_CODE (x))
2700 case STRICT_LOW_PART:
2709 /* Can only legitimately appear this early in the context of
2710 stack pushes for function arguments, but handle all of the
2711 codes nonetheless. */
2715 /* Setting a subreg of a register larger than word_mode leaves
2716 the non-written words unchanged. */
2717 if (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) > BITS_PER_WORD)
2725 find_used_regs (xptr, data);
2728 /* Try to perform local const/copy propagation on X in INSN. */
2731 do_local_cprop (rtx x, rtx insn)
2733 rtx newreg = NULL, newcnst = NULL;
2735 /* Rule out USE instructions and ASM statements as we don't want to
2736 change the hard registers mentioned. */
2738 && (REGNO (x) >= FIRST_PSEUDO_REGISTER
2739 || (GET_CODE (PATTERN (insn)) != USE
2740 && asm_noperands (PATTERN (insn)) < 0)))
2742 cselib_val *val = cselib_lookup (x, GET_MODE (x), 0);
2743 struct elt_loc_list *l;
2747 for (l = val->locs; l; l = l->next)
2749 rtx this_rtx = l->loc;
2752 if (gcse_constant_p (this_rtx))
2754 if (REG_P (this_rtx) && REGNO (this_rtx) >= FIRST_PSEUDO_REGISTER
2755 /* Don't copy propagate if it has attached REG_EQUIV note.
2756 At this point this only function parameters should have
2757 REG_EQUIV notes and if the argument slot is used somewhere
2758 explicitly, it means address of parameter has been taken,
2759 so we should not extend the lifetime of the pseudo. */
2760 && (!(note = find_reg_note (l->setting_insn, REG_EQUIV, NULL_RTX))
2761 || ! MEM_P (XEXP (note, 0))))
2764 if (newcnst && constprop_register (insn, x, newcnst))
2766 if (dump_file != NULL)
2768 fprintf (dump_file, "LOCAL CONST-PROP: Replacing reg %d in ",
2770 fprintf (dump_file, "insn %d with constant ",
2772 print_rtl (dump_file, newcnst);
2773 fprintf (dump_file, "\n");
2775 local_const_prop_count++;
2778 else if (newreg && newreg != x && try_replace_reg (x, newreg, insn))
2780 if (dump_file != NULL)
2783 "LOCAL COPY-PROP: Replacing reg %d in insn %d",
2784 REGNO (x), INSN_UID (insn));
2785 fprintf (dump_file, " with reg %d\n", REGNO (newreg));
2787 local_copy_prop_count++;
2794 /* Do local const/copy propagation (i.e. within each basic block). */
2797 local_cprop_pass (void)
2801 struct reg_use *reg_used;
2802 bool changed = false;
2807 FOR_BB_INSNS (bb, insn)
2811 rtx note = find_reg_equal_equiv_note (insn);
2815 note_uses (&PATTERN (insn), local_cprop_find_used_regs,
2818 local_cprop_find_used_regs (&XEXP (note, 0), NULL);
2820 for (reg_used = ®_use_table[0]; reg_use_count > 0;
2821 reg_used++, reg_use_count--)
2823 if (do_local_cprop (reg_used->reg_rtx, insn))
2829 if (INSN_DELETED_P (insn))
2832 while (reg_use_count);
2834 cselib_process_insn (insn);
2837 /* Forget everything at the end of a basic block. */
2838 cselib_clear_table ();
2846 /* Similar to get_condition, only the resulting condition must be
2847 valid at JUMP, instead of at EARLIEST.
2849 This differs from noce_get_condition in ifcvt.c in that we prefer not to
2850 settle for the condition variable in the jump instruction being integral.
2851 We prefer to be able to record the value of a user variable, rather than
2852 the value of a temporary used in a condition. This could be solved by
2853 recording the value of *every* register scanned by canonicalize_condition,
2854 but this would require some code reorganization. */
2857 fis_get_condition (rtx jump)
2859 return get_condition (jump, NULL, false, true);
2862 /* Check the comparison COND to see if we can safely form an implicit set from
2863 it. COND is either an EQ or NE comparison. */
2866 implicit_set_cond_p (const_rtx cond)
2868 const enum machine_mode mode = GET_MODE (XEXP (cond, 0));
2869 const_rtx cst = XEXP (cond, 1);
2871 /* We can't perform this optimization if either operand might be or might
2872 contain a signed zero. */
2873 if (HONOR_SIGNED_ZEROS (mode))
2875 /* It is sufficient to check if CST is or contains a zero. We must
2876 handle float, complex, and vector. If any subpart is a zero, then
2877 the optimization can't be performed. */
2878 /* ??? The complex and vector checks are not implemented yet. We just
2879 always return zero for them. */
2880 if (GET_CODE (cst) == CONST_DOUBLE)
2883 REAL_VALUE_FROM_CONST_DOUBLE (d, cst);
2884 if (REAL_VALUES_EQUAL (d, dconst0))
2891 return gcse_constant_p (cst);
2894 /* Find the implicit sets of a function. An "implicit set" is a constraint
2895 on the value of a variable, implied by a conditional jump. For example,
2896 following "if (x == 2)", the then branch may be optimized as though the
2897 conditional performed an "explicit set", in this example, "x = 2". This
2898 function records the set patterns that are implicit at the start of each
2901 FIXME: This would be more effective if critical edges are pre-split. As
2902 it is now, we can't record implicit sets for blocks that have
2903 critical successor edges. This results in missed optimizations
2904 and in more (unnecessary) work in cfgcleanup.c:thread_jump(). */
2907 find_implicit_sets (void)
2909 basic_block bb, dest;
2915 /* Check for more than one successor. */
2916 if (EDGE_COUNT (bb->succs) > 1)
2918 cond = fis_get_condition (BB_END (bb));
2921 && (GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
2922 && REG_P (XEXP (cond, 0))
2923 && REGNO (XEXP (cond, 0)) >= FIRST_PSEUDO_REGISTER
2924 && implicit_set_cond_p (cond))
2926 dest = GET_CODE (cond) == EQ ? BRANCH_EDGE (bb)->dest
2927 : FALLTHRU_EDGE (bb)->dest;
2930 /* Record nothing for a critical edge. */
2931 && single_pred_p (dest)
2932 && dest != EXIT_BLOCK_PTR)
2934 new_rtx = gen_rtx_SET (VOIDmode, XEXP (cond, 0),
2936 implicit_sets[dest->index] = new_rtx;
2939 fprintf(dump_file, "Implicit set of reg %d in ",
2940 REGNO (XEXP (cond, 0)));
2941 fprintf(dump_file, "basic block %d\n", dest->index);
2949 fprintf (dump_file, "Found %d implicit sets\n", count);
2952 /* Bypass conditional jumps. */
2954 /* The value of last_basic_block at the beginning of the jump_bypass
2955 pass. The use of redirect_edge_and_branch_force may introduce new
2956 basic blocks, but the data flow analysis is only valid for basic
2957 block indices less than bypass_last_basic_block. */
2959 static int bypass_last_basic_block;
2961 /* Find a set of REGNO to a constant that is available at the end of basic
2962 block BB. Returns NULL if no such set is found. Based heavily upon
2965 static struct expr *
2966 find_bypass_set (int regno, int bb)
2968 struct expr *result = 0;
2973 struct expr *set = lookup_set (regno, &set_hash_table);
2977 if (TEST_BIT (cprop_avout[bb], set->bitmap_index))
2979 set = next_set (regno, set);
2985 gcc_assert (GET_CODE (set->expr) == SET);
2987 src = SET_SRC (set->expr);
2988 if (gcse_constant_p (src))
2994 regno = REGNO (src);
3000 /* Subroutine of bypass_block that checks whether a pseudo is killed by
3001 any of the instructions inserted on an edge. Jump bypassing places
3002 condition code setters on CFG edges using insert_insn_on_edge. This
3003 function is required to check that our data flow analysis is still
3004 valid prior to commit_edge_insertions. */
3007 reg_killed_on_edge (const_rtx reg, const_edge e)
3011 for (insn = e->insns.r; insn; insn = NEXT_INSN (insn))
3012 if (INSN_P (insn) && reg_set_p (reg, insn))
3018 /* Subroutine of bypass_conditional_jumps that attempts to bypass the given
3019 basic block BB which has more than one predecessor. If not NULL, SETCC
3020 is the first instruction of BB, which is immediately followed by JUMP_INSN
3021 JUMP. Otherwise, SETCC is NULL, and JUMP is the first insn of BB.
3022 Returns nonzero if a change was made.
3024 During the jump bypassing pass, we may place copies of SETCC instructions
3025 on CFG edges. The following routine must be careful to pay attention to
3026 these inserted insns when performing its transformations. */
3029 bypass_block (basic_block bb, rtx setcc, rtx jump)
3034 int may_be_loop_header;
3038 insn = (setcc != NULL) ? setcc : jump;
3040 /* Determine set of register uses in INSN. */
3042 note_uses (&PATTERN (insn), find_used_regs, NULL);
3043 note = find_reg_equal_equiv_note (insn);
3045 find_used_regs (&XEXP (note, 0), NULL);
3047 may_be_loop_header = false;
3048 FOR_EACH_EDGE (e, ei, bb->preds)
3049 if (e->flags & EDGE_DFS_BACK)
3051 may_be_loop_header = true;
3056 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
3060 if (e->flags & EDGE_COMPLEX)
3066 /* We can't redirect edges from new basic blocks. */
3067 if (e->src->index >= bypass_last_basic_block)
3073 /* The irreducible loops created by redirecting of edges entering the
3074 loop from outside would decrease effectiveness of some of the following
3075 optimizations, so prevent this. */
3076 if (may_be_loop_header
3077 && !(e->flags & EDGE_DFS_BACK))
3083 for (i = 0; i < reg_use_count; i++)
3085 struct reg_use *reg_used = ®_use_table[i];
3086 unsigned int regno = REGNO (reg_used->reg_rtx);
3087 basic_block dest, old_dest;
3091 set = find_bypass_set (regno, e->src->index);
3096 /* Check the data flow is valid after edge insertions. */
3097 if (e->insns.r && reg_killed_on_edge (reg_used->reg_rtx, e))
3100 src = SET_SRC (pc_set (jump));
3103 src = simplify_replace_rtx (src,
3104 SET_DEST (PATTERN (setcc)),
3105 SET_SRC (PATTERN (setcc)));
3107 new_rtx = simplify_replace_rtx (src, reg_used->reg_rtx,
3108 SET_SRC (set->expr));
3110 /* Jump bypassing may have already placed instructions on
3111 edges of the CFG. We can't bypass an outgoing edge that
3112 has instructions associated with it, as these insns won't
3113 get executed if the incoming edge is redirected. */
3115 if (new_rtx == pc_rtx)
3117 edest = FALLTHRU_EDGE (bb);
3118 dest = edest->insns.r ? NULL : edest->dest;
3120 else if (GET_CODE (new_rtx) == LABEL_REF)
3122 dest = BLOCK_FOR_INSN (XEXP (new_rtx, 0));
3123 /* Don't bypass edges containing instructions. */
3124 edest = find_edge (bb, dest);
3125 if (edest && edest->insns.r)
3131 /* Avoid unification of the edge with other edges from original
3132 branch. We would end up emitting the instruction on "both"
3135 if (dest && setcc && !CC0_P (SET_DEST (PATTERN (setcc)))
3136 && find_edge (e->src, dest))
3142 && dest != EXIT_BLOCK_PTR)
3144 redirect_edge_and_branch_force (e, dest);
3146 /* Copy the register setter to the redirected edge.
3147 Don't copy CC0 setters, as CC0 is dead after jump. */
3150 rtx pat = PATTERN (setcc);
3151 if (!CC0_P (SET_DEST (pat)))
3152 insert_insn_on_edge (copy_insn (pat), e);
3155 if (dump_file != NULL)
3157 fprintf (dump_file, "JUMP-BYPASS: Proved reg %d "
3158 "in jump_insn %d equals constant ",
3159 regno, INSN_UID (jump));
3160 print_rtl (dump_file, SET_SRC (set->expr));
3161 fprintf (dump_file, "\nBypass edge from %d->%d to %d\n",
3162 e->src->index, old_dest->index, dest->index);
3175 /* Find basic blocks with more than one predecessor that only contain a
3176 single conditional jump. If the result of the comparison is known at
3177 compile-time from any incoming edge, redirect that edge to the
3178 appropriate target. Returns nonzero if a change was made.
3180 This function is now mis-named, because we also handle indirect jumps. */
3183 bypass_conditional_jumps (void)
3191 /* Note we start at block 1. */
3192 if (ENTRY_BLOCK_PTR->next_bb == EXIT_BLOCK_PTR)
3195 bypass_last_basic_block = last_basic_block;
3196 mark_dfs_back_edges ();
3199 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb,
3200 EXIT_BLOCK_PTR, next_bb)
3202 /* Check for more than one predecessor. */
3203 if (!single_pred_p (bb))
3206 FOR_BB_INSNS (bb, insn)
3207 if (DEBUG_INSN_P (insn))
3209 else if (NONJUMP_INSN_P (insn))
3213 if (GET_CODE (PATTERN (insn)) != SET)
3216 dest = SET_DEST (PATTERN (insn));
3217 if (REG_P (dest) || CC0_P (dest))
3222 else if (JUMP_P (insn))
3224 if ((any_condjump_p (insn) || computed_jump_p (insn))
3225 && onlyjump_p (insn))
3226 changed |= bypass_block (bb, setcc, insn);
3229 else if (INSN_P (insn))
3234 /* If we bypassed any register setting insns, we inserted a
3235 copy on the redirected edge. These need to be committed. */
3237 commit_edge_insertions ();
3242 /* Compute PRE+LCM working variables. */
3244 /* Local properties of expressions. */
3245 /* Nonzero for expressions that are transparent in the block. */
3246 static sbitmap *transp;
3248 /* Nonzero for expressions that are computed (available) in the block. */
3249 static sbitmap *comp;
3251 /* Nonzero for expressions that are locally anticipatable in the block. */
3252 static sbitmap *antloc;
3254 /* Nonzero for expressions where this block is an optimal computation
3256 static sbitmap *pre_optimal;
3258 /* Nonzero for expressions which are redundant in a particular block. */
3259 static sbitmap *pre_redundant;
3261 /* Nonzero for expressions which should be inserted on a specific edge. */
3262 static sbitmap *pre_insert_map;
3264 /* Nonzero for expressions which should be deleted in a specific block. */
3265 static sbitmap *pre_delete_map;
3267 /* Contains the edge_list returned by pre_edge_lcm. */
3268 static struct edge_list *edge_list;
3270 /* Allocate vars used for PRE analysis. */
3273 alloc_pre_mem (int n_blocks, int n_exprs)
3275 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
3276 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
3277 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
3280 pre_redundant = NULL;
3281 pre_insert_map = NULL;
3282 pre_delete_map = NULL;
3283 ae_kill = sbitmap_vector_alloc (n_blocks, n_exprs);
3285 /* pre_insert and pre_delete are allocated later. */
3288 /* Free vars used for PRE analysis. */
3293 sbitmap_vector_free (transp);
3294 sbitmap_vector_free (comp);
3296 /* ANTLOC and AE_KILL are freed just after pre_lcm finishes. */
3299 sbitmap_vector_free (pre_optimal);
3301 sbitmap_vector_free (pre_redundant);
3303 sbitmap_vector_free (pre_insert_map);
3305 sbitmap_vector_free (pre_delete_map);
3307 transp = comp = NULL;
3308 pre_optimal = pre_redundant = pre_insert_map = pre_delete_map = NULL;
3311 /* Remove certain expressions from anticipatable and transparent
3312 sets of basic blocks that have incoming abnormal edge.
3313 For PRE remove potentially trapping expressions to avoid placing
3314 them on abnormal edges. For hoisting remove memory references that
3315 can be clobbered by calls. */
3318 prune_expressions (bool pre_p)
3320 sbitmap prune_exprs;
3324 prune_exprs = sbitmap_alloc (expr_hash_table.n_elems);
3325 sbitmap_zero (prune_exprs);
3326 for (ui = 0; ui < expr_hash_table.size; ui++)
3329 for (e = expr_hash_table.table[ui]; e != NULL; e = e->next_same_hash)
3331 /* Note potentially trapping expressions. */
3332 if (may_trap_p (e->expr))
3334 SET_BIT (prune_exprs, e->bitmap_index);
3338 if (!pre_p && MEM_P (e->expr))
3339 /* Note memory references that can be clobbered by a call.
3340 We do not split abnormal edges in hoisting, so would
3341 a memory reference get hoisted along an abnormal edge,
3342 it would be placed /before/ the call. Therefore, only
3343 constant memory references can be hoisted along abnormal
3346 if (GET_CODE (XEXP (e->expr, 0)) == SYMBOL_REF
3347 && CONSTANT_POOL_ADDRESS_P (XEXP (e->expr, 0)))
3350 if (MEM_READONLY_P (e->expr)
3351 && !MEM_VOLATILE_P (e->expr)
3352 && MEM_NOTRAP_P (e->expr))
3353 /* Constant memory reference, e.g., a PIC address. */
3356 /* ??? Optimally, we would use interprocedural alias
3357 analysis to determine if this mem is actually killed
3360 SET_BIT (prune_exprs, e->bitmap_index);
3370 /* If the current block is the destination of an abnormal edge, we
3371 kill all trapping (for PRE) and memory (for hoist) expressions
3372 because we won't be able to properly place the instruction on
3373 the edge. So make them neither anticipatable nor transparent.
3374 This is fairly conservative.
3376 ??? For hoisting it may be necessary to check for set-and-jump
3377 instructions here, not just for abnormal edges. The general problem
3378 is that when an expression cannot not be placed right at the end of
3379 a basic block we should account for any side-effects of a subsequent
3380 jump instructions that could clobber the expression. It would
3381 be best to implement this check along the lines of
3382 hoist_expr_reaches_here_p where the target block is already known
3383 and, hence, there's no need to conservatively prune expressions on
3384 "intermediate" set-and-jump instructions. */
3385 FOR_EACH_EDGE (e, ei, bb->preds)
3386 if ((e->flags & EDGE_ABNORMAL)
3387 && (pre_p || CALL_P (BB_END (e->src))))
3389 sbitmap_difference (antloc[bb->index],
3390 antloc[bb->index], prune_exprs);
3391 sbitmap_difference (transp[bb->index],
3392 transp[bb->index], prune_exprs);
3397 sbitmap_free (prune_exprs);
3400 /* Top level routine to do the dataflow analysis needed by PRE. */
3403 compute_pre_data (void)
3407 compute_local_properties (transp, comp, antloc, &expr_hash_table);
3408 prune_expressions (true);
3409 sbitmap_vector_zero (ae_kill, last_basic_block);
3411 /* Compute ae_kill for each basic block using:
3418 sbitmap_a_or_b (ae_kill[bb->index], transp[bb->index], comp[bb->index]);
3419 sbitmap_not (ae_kill[bb->index], ae_kill[bb->index]);
3422 edge_list = pre_edge_lcm (expr_hash_table.n_elems, transp, comp, antloc,
3423 ae_kill, &pre_insert_map, &pre_delete_map);
3424 sbitmap_vector_free (antloc);
3426 sbitmap_vector_free (ae_kill);
3432 /* Return nonzero if an occurrence of expression EXPR in OCCR_BB would reach
3435 VISITED is a pointer to a working buffer for tracking which BB's have
3436 been visited. It is NULL for the top-level call.
3438 We treat reaching expressions that go through blocks containing the same
3439 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
3440 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
3441 2 as not reaching. The intent is to improve the probability of finding
3442 only one reaching expression and to reduce register lifetimes by picking
3443 the closest such expression. */
3446 pre_expr_reaches_here_p_work (basic_block occr_bb, struct expr *expr, basic_block bb, char *visited)
3451 FOR_EACH_EDGE (pred, ei, bb->preds)
3453 basic_block pred_bb = pred->src;
3455 if (pred->src == ENTRY_BLOCK_PTR
3456 /* Has predecessor has already been visited? */
3457 || visited[pred_bb->index])
3458 ;/* Nothing to do. */
3460 /* Does this predecessor generate this expression? */
3461 else if (TEST_BIT (comp[pred_bb->index], expr->bitmap_index))
3463 /* Is this the occurrence we're looking for?
3464 Note that there's only one generating occurrence per block
3465 so we just need to check the block number. */
3466 if (occr_bb == pred_bb)
3469 visited[pred_bb->index] = 1;
3471 /* Ignore this predecessor if it kills the expression. */
3472 else if (! TEST_BIT (transp[pred_bb->index], expr->bitmap_index))
3473 visited[pred_bb->index] = 1;
3475 /* Neither gen nor kill. */
3478 visited[pred_bb->index] = 1;
3479 if (pre_expr_reaches_here_p_work (occr_bb, expr, pred_bb, visited))
3484 /* All paths have been checked. */
3488 /* The wrapper for pre_expr_reaches_here_work that ensures that any
3489 memory allocated for that function is returned. */
3492 pre_expr_reaches_here_p (basic_block occr_bb, struct expr *expr, basic_block bb)
3495 char *visited = XCNEWVEC (char, last_basic_block);
3497 rval = pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited);
3504 /* Given an expr, generate RTL which we can insert at the end of a BB,
3505 or on an edge. Set the block number of any insns generated to
3509 process_insert_insn (struct expr *expr)
3511 rtx reg = expr->reaching_reg;
3512 rtx exp = copy_rtx (expr->expr);
3517 /* If the expression is something that's an operand, like a constant,
3518 just copy it to a register. */
3519 if (general_operand (exp, GET_MODE (reg)))
3520 emit_move_insn (reg, exp);
3522 /* Otherwise, make a new insn to compute this expression and make sure the
3523 insn will be recognized (this also adds any needed CLOBBERs). Copy the
3524 expression to make sure we don't have any sharing issues. */
3527 rtx insn = emit_insn (gen_rtx_SET (VOIDmode, reg, exp));
3529 if (insn_invalid_p (insn))
3540 /* Add EXPR to the end of basic block BB.
3542 This is used by both the PRE and code hoisting. */
3545 insert_insn_end_basic_block (struct expr *expr, basic_block bb)
3547 rtx insn = BB_END (bb);
3549 rtx reg = expr->reaching_reg;
3550 int regno = REGNO (reg);
3553 pat = process_insert_insn (expr);
3554 gcc_assert (pat && INSN_P (pat));
3557 while (NEXT_INSN (pat_end) != NULL_RTX)
3558 pat_end = NEXT_INSN (pat_end);
3560 /* If the last insn is a jump, insert EXPR in front [taking care to
3561 handle cc0, etc. properly]. Similarly we need to care trapping
3562 instructions in presence of non-call exceptions. */
3565 || (NONJUMP_INSN_P (insn)
3566 && (!single_succ_p (bb)
3567 || single_succ_edge (bb)->flags & EDGE_ABNORMAL)))
3573 /* If this is a jump table, then we can't insert stuff here. Since
3574 we know the previous real insn must be the tablejump, we insert
3575 the new instruction just before the tablejump. */
3576 if (GET_CODE (PATTERN (insn)) == ADDR_VEC
3577 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
3578 insn = prev_real_insn (insn);
3581 /* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts
3582 if cc0 isn't set. */
3583 note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
3585 insn = XEXP (note, 0);
3588 rtx maybe_cc0_setter = prev_nonnote_insn (insn);
3589 if (maybe_cc0_setter
3590 && INSN_P (maybe_cc0_setter)
3591 && sets_cc0_p (PATTERN (maybe_cc0_setter)))
3592 insn = maybe_cc0_setter;
3595 /* FIXME: What if something in cc0/jump uses value set in new insn? */
3596 new_insn = emit_insn_before_noloc (pat, insn, bb);
3599 /* Likewise if the last insn is a call, as will happen in the presence
3600 of exception handling. */
3601 else if (CALL_P (insn)
3602 && (!single_succ_p (bb)
3603 || single_succ_edge (bb)->flags & EDGE_ABNORMAL))
3605 /* Keeping in mind targets with small register classes and parameters
3606 in registers, we search backward and place the instructions before
3607 the first parameter is loaded. Do this for everyone for consistency
3608 and a presumption that we'll get better code elsewhere as well. */
3610 /* Since different machines initialize their parameter registers
3611 in different orders, assume nothing. Collect the set of all
3612 parameter registers. */
3613 insn = find_first_parameter_load (insn, BB_HEAD (bb));
3615 /* If we found all the parameter loads, then we want to insert
3616 before the first parameter load.
3618 If we did not find all the parameter loads, then we might have
3619 stopped on the head of the block, which could be a CODE_LABEL.
3620 If we inserted before the CODE_LABEL, then we would be putting
3621 the insn in the wrong basic block. In that case, put the insn
3622 after the CODE_LABEL. Also, respect NOTE_INSN_BASIC_BLOCK. */
3623 while (LABEL_P (insn)
3624 || NOTE_INSN_BASIC_BLOCK_P (insn))
3625 insn = NEXT_INSN (insn);
3627 new_insn = emit_insn_before_noloc (pat, insn, bb);
3630 new_insn = emit_insn_after_noloc (pat, insn, bb);
3635 add_label_notes (PATTERN (pat), new_insn);
3638 pat = NEXT_INSN (pat);
3641 gcse_create_count++;
3645 fprintf (dump_file, "PRE/HOIST: end of bb %d, insn %d, ",
3646 bb->index, INSN_UID (new_insn));
3647 fprintf (dump_file, "copying expression %d to reg %d\n",
3648 expr->bitmap_index, regno);
3652 /* Insert partially redundant expressions on edges in the CFG to make
3653 the expressions fully redundant. */
3656 pre_edge_insert (struct edge_list *edge_list, struct expr **index_map)
3658 int e, i, j, num_edges, set_size, did_insert = 0;
3661 /* Where PRE_INSERT_MAP is nonzero, we add the expression on that edge
3662 if it reaches any of the deleted expressions. */
3664 set_size = pre_insert_map[0]->size;
3665 num_edges = NUM_EDGES (edge_list);
3666 inserted = sbitmap_vector_alloc (num_edges, expr_hash_table.n_elems);
3667 sbitmap_vector_zero (inserted, num_edges);
3669 for (e = 0; e < num_edges; e++)
3672 basic_block bb = INDEX_EDGE_PRED_BB (edge_list, e);
3674 for (i = indx = 0; i < set_size; i++, indx += SBITMAP_ELT_BITS)
3676 SBITMAP_ELT_TYPE insert = pre_insert_map[e]->elms[i];
3678 for (j = indx; insert && j < (int) expr_hash_table.n_elems; j++, insert >>= 1)
3679 if ((insert & 1) != 0 && index_map[j]->reaching_reg != NULL_RTX)
3681 struct expr *expr = index_map[j];
3684 /* Now look at each deleted occurrence of this expression. */
3685 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
3687 if (! occr->deleted_p)
3690 /* Insert this expression on this edge if it would
3691 reach the deleted occurrence in BB. */
3692 if (!TEST_BIT (inserted[e], j))
3695 edge eg = INDEX_EDGE (edge_list, e);
3697 /* We can't insert anything on an abnormal and
3698 critical edge, so we insert the insn at the end of
3699 the previous block. There are several alternatives
3700 detailed in Morgans book P277 (sec 10.5) for
3701 handling this situation. This one is easiest for
3704 if (eg->flags & EDGE_ABNORMAL)
3705 insert_insn_end_basic_block (index_map[j], bb);
3708 insn = process_insert_insn (index_map[j]);
3709 insert_insn_on_edge (insn, eg);
3714 fprintf (dump_file, "PRE: edge (%d,%d), ",
3716 INDEX_EDGE_SUCC_BB (edge_list, e)->index);
3717 fprintf (dump_file, "copy expression %d\n",
3718 expr->bitmap_index);
3721 update_ld_motion_stores (expr);
3722 SET_BIT (inserted[e], j);
3724 gcse_create_count++;
3731 sbitmap_vector_free (inserted);
3735 /* Copy the result of EXPR->EXPR generated by INSN to EXPR->REACHING_REG.
3736 Given "old_reg <- expr" (INSN), instead of adding after it
3737 reaching_reg <- old_reg
3738 it's better to do the following:
3739 reaching_reg <- expr
3740 old_reg <- reaching_reg
3741 because this way copy propagation can discover additional PRE
3742 opportunities. But if this fails, we try the old way.
3743 When "expr" is a store, i.e.
3744 given "MEM <- old_reg", instead of adding after it
3745 reaching_reg <- old_reg
3746 it's better to add it before as follows:
3747 reaching_reg <- old_reg
3748 MEM <- reaching_reg. */
3751 pre_insert_copy_insn (struct expr *expr, rtx insn)
3753 rtx reg = expr->reaching_reg;
3754 int regno = REGNO (reg);
3755 int indx = expr->bitmap_index;
3756 rtx pat = PATTERN (insn);
3757 rtx set, first_set, new_insn;
3761 /* This block matches the logic in hash_scan_insn. */
3762 switch (GET_CODE (pat))
3769 /* Search through the parallel looking for the set whose
3770 source was the expression that we're interested in. */
3771 first_set = NULL_RTX;
3773 for (i = 0; i < XVECLEN (pat, 0); i++)
3775 rtx x = XVECEXP (pat, 0, i);
3776 if (GET_CODE (x) == SET)
3778 /* If the source was a REG_EQUAL or REG_EQUIV note, we
3779 may not find an equivalent expression, but in this
3780 case the PARALLEL will have a single set. */
3781 if (first_set == NULL_RTX)
3783 if (expr_equiv_p (SET_SRC (x), expr->expr))
3791 gcc_assert (first_set);
3792 if (set == NULL_RTX)
3800 if (REG_P (SET_DEST (set)))
3802 old_reg = SET_DEST (set);
3803 /* Check if we can modify the set destination in the original insn. */
3804 if (validate_change (insn, &SET_DEST (set), reg, 0))
3806 new_insn = gen_move_insn (old_reg, reg);
3807 new_insn = emit_insn_after (new_insn, insn);
3811 new_insn = gen_move_insn (reg, old_reg);
3812 new_insn = emit_insn_after (new_insn, insn);
3815 else /* This is possible only in case of a store to memory. */
3817 old_reg = SET_SRC (set);
3818 new_insn = gen_move_insn (reg, old_reg);
3820 /* Check if we can modify the set source in the original insn. */
3821 if (validate_change (insn, &SET_SRC (set), reg, 0))
3822 new_insn = emit_insn_before (new_insn, insn);
3824 new_insn = emit_insn_after (new_insn, insn);
3827 gcse_create_count++;
3831 "PRE: bb %d, insn %d, copy expression %d in insn %d to reg %d\n",
3832 BLOCK_FOR_INSN (insn)->index, INSN_UID (new_insn), indx,
3833 INSN_UID (insn), regno);
3836 /* Copy available expressions that reach the redundant expression
3837 to `reaching_reg'. */
3840 pre_insert_copies (void)
3842 unsigned int i, added_copy;
3847 /* For each available expression in the table, copy the result to
3848 `reaching_reg' if the expression reaches a deleted one.
3850 ??? The current algorithm is rather brute force.
3851 Need to do some profiling. */
3853 for (i = 0; i < expr_hash_table.size; i++)
3854 for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
3856 /* If the basic block isn't reachable, PPOUT will be TRUE. However,
3857 we don't want to insert a copy here because the expression may not
3858 really be redundant. So only insert an insn if the expression was
3859 deleted. This test also avoids further processing if the
3860 expression wasn't deleted anywhere. */
3861 if (expr->reaching_reg == NULL)
3864 /* Set when we add a copy for that expression. */
3867 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
3869 if (! occr->deleted_p)
3872 for (avail = expr->avail_occr; avail != NULL; avail = avail->next)
3874 rtx insn = avail->insn;
3876 /* No need to handle this one if handled already. */
3877 if (avail->copied_p)
3880 /* Don't handle this one if it's a redundant one. */
3881 if (INSN_DELETED_P (insn))
3884 /* Or if the expression doesn't reach the deleted one. */
3885 if (! pre_expr_reaches_here_p (BLOCK_FOR_INSN (avail->insn),
3887 BLOCK_FOR_INSN (occr->insn)))
3892 /* Copy the result of avail to reaching_reg. */
3893 pre_insert_copy_insn (expr, insn);
3894 avail->copied_p = 1;
3899 update_ld_motion_stores (expr);
3903 /* Emit move from SRC to DEST noting the equivalence with expression computed
3906 gcse_emit_move_after (rtx src, rtx dest, rtx insn)
3909 rtx set = single_set (insn), set2;
3913 /* This should never fail since we're creating a reg->reg copy
3914 we've verified to be valid. */
3916 new_rtx = emit_insn_after (gen_move_insn (dest, src), insn);
3918 /* Note the equivalence for local CSE pass. */
3919 set2 = single_set (new_rtx);
3920 if (!set2 || !rtx_equal_p (SET_DEST (set2), dest))
3922 if ((note = find_reg_equal_equiv_note (insn)))
3923 eqv = XEXP (note, 0);
3925 eqv = SET_SRC (set);
3927 set_unique_reg_note (new_rtx, REG_EQUAL, copy_insn_1 (eqv));
3932 /* Delete redundant computations.
3933 Deletion is done by changing the insn to copy the `reaching_reg' of
3934 the expression into the result of the SET. It is left to later passes
3935 (cprop, cse2, flow, combine, regmove) to propagate the copy or eliminate it.
3937 Returns nonzero if a change is made. */
3948 for (i = 0; i < expr_hash_table.size; i++)
3949 for (expr = expr_hash_table.table[i];
3951 expr = expr->next_same_hash)
3953 int indx = expr->bitmap_index;
3955 /* We only need to search antic_occr since we require
3958 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
3960 rtx insn = occr->insn;
3962 basic_block bb = BLOCK_FOR_INSN (insn);
3964 /* We only delete insns that have a single_set. */
3965 if (TEST_BIT (pre_delete_map[bb->index], indx)
3966 && (set = single_set (insn)) != 0
3967 && dbg_cnt (pre_insn))
3969 /* Create a pseudo-reg to store the result of reaching
3970 expressions into. Get the mode for the new pseudo from
3971 the mode of the original destination pseudo. */
3972 if (expr->reaching_reg == NULL)
3973 expr->reaching_reg = gen_reg_rtx_and_attrs (SET_DEST (set));
3975 gcse_emit_move_after (expr->reaching_reg, SET_DEST (set), insn);
3977 occr->deleted_p = 1;
3984 "PRE: redundant insn %d (expression %d) in ",
3985 INSN_UID (insn), indx);
3986 fprintf (dump_file, "bb %d, reaching reg is %d\n",
3987 bb->index, REGNO (expr->reaching_reg));
3996 /* Perform GCSE optimizations using PRE.
3997 This is called by one_pre_gcse_pass after all the dataflow analysis
4000 This is based on the original Morel-Renvoise paper Fred Chow's thesis, and
4001 lazy code motion from Knoop, Ruthing and Steffen as described in Advanced
4002 Compiler Design and Implementation.
4004 ??? A new pseudo reg is created to hold the reaching expression. The nice
4005 thing about the classical approach is that it would try to use an existing
4006 reg. If the register can't be adequately optimized [i.e. we introduce
4007 reload problems], one could add a pass here to propagate the new register
4010 ??? We don't handle single sets in PARALLELs because we're [currently] not
4011 able to copy the rest of the parallel when we insert copies to create full
4012 redundancies from partial redundancies. However, there's no reason why we
4013 can't handle PARALLELs in the cases where there are no partial
4020 int did_insert, changed;
4021 struct expr **index_map;
4024 /* Compute a mapping from expression number (`bitmap_index') to
4025 hash table entry. */
4027 index_map = XCNEWVEC (struct expr *, expr_hash_table.n_elems);
4028 for (i = 0; i < expr_hash_table.size; i++)
4029 for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
4030 index_map[expr->bitmap_index] = expr;
4032 /* Delete the redundant insns first so that
4033 - we know what register to use for the new insns and for the other
4034 ones with reaching expressions
4035 - we know which insns are redundant when we go to create copies */
4037 changed = pre_delete ();
4038 did_insert = pre_edge_insert (edge_list, index_map);
4040 /* In other places with reaching expressions, copy the expression to the
4041 specially allocated pseudo-reg that reaches the redundant expr. */
4042 pre_insert_copies ();
4045 commit_edge_insertions ();
4053 /* Top level routine to perform one PRE GCSE pass.
4055 Return nonzero if a change was made. */
4058 one_pre_gcse_pass (void)
4062 gcse_subst_count = 0;
4063 gcse_create_count = 0;
4065 /* Return if there's nothing to do, or it is too expensive. */
4066 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
4067 || is_too_expensive (_("PRE disabled")))
4070 /* We need alias. */
4071 init_alias_analysis ();
4074 gcc_obstack_init (&gcse_obstack);
4077 alloc_hash_table (&expr_hash_table, 0);
4078 add_noreturn_fake_exit_edges ();
4080 compute_ld_motion_mems ();
4082 compute_hash_table (&expr_hash_table);
4083 trim_ld_motion_mems ();
4085 dump_hash_table (dump_file, "Expression", &expr_hash_table);
4087 if (expr_hash_table.n_elems > 0)
4089 alloc_pre_mem (last_basic_block, expr_hash_table.n_elems);
4090 compute_pre_data ();
4091 changed |= pre_gcse ();
4092 free_edge_list (edge_list);
4097 remove_fake_exit_edges ();
4098 free_hash_table (&expr_hash_table);
4101 obstack_free (&gcse_obstack, NULL);
4103 /* We are finished with alias. */
4104 end_alias_analysis ();
4108 fprintf (dump_file, "PRE GCSE of %s, %d basic blocks, %d bytes needed, ",
4109 current_function_name (), n_basic_blocks, bytes_used);
4110 fprintf (dump_file, "%d substs, %d insns created\n",
4111 gcse_subst_count, gcse_create_count);
4117 /* If X contains any LABEL_REF's, add REG_LABEL_OPERAND notes for them
4118 to INSN. If such notes are added to an insn which references a
4119 CODE_LABEL, the LABEL_NUSES count is incremented. We have to add
4120 that note, because the following loop optimization pass requires
4123 /* ??? If there was a jump optimization pass after gcse and before loop,
4124 then we would not need to do this here, because jump would add the
4125 necessary REG_LABEL_OPERAND and REG_LABEL_TARGET notes. */
4128 add_label_notes (rtx x, rtx insn)
4130 enum rtx_code code = GET_CODE (x);
4134 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
4136 /* This code used to ignore labels that referred to dispatch tables to
4137 avoid flow generating (slightly) worse code.
4139 We no longer ignore such label references (see LABEL_REF handling in
4140 mark_jump_label for additional information). */
4142 /* There's no reason for current users to emit jump-insns with
4143 such a LABEL_REF, so we don't have to handle REG_LABEL_TARGET
4145 gcc_assert (!JUMP_P (insn));
4146 add_reg_note (insn, REG_LABEL_OPERAND, XEXP (x, 0));
4148 if (LABEL_P (XEXP (x, 0)))
4149 LABEL_NUSES (XEXP (x, 0))++;
4154 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
4157 add_label_notes (XEXP (x, i), insn);
4158 else if (fmt[i] == 'E')
4159 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4160 add_label_notes (XVECEXP (x, i, j), insn);
4164 /* Code Hoisting variables and subroutines. */
4166 /* Very busy expressions. */
4167 static sbitmap *hoist_vbein;
4168 static sbitmap *hoist_vbeout;
4170 /* ??? We could compute post dominators and run this algorithm in
4171 reverse to perform tail merging, doing so would probably be
4172 more effective than the tail merging code in jump.c.
4174 It's unclear if tail merging could be run in parallel with
4175 code hoisting. It would be nice. */
4177 /* Allocate vars used for code hoisting analysis. */
4180 alloc_code_hoist_mem (int n_blocks, int n_exprs)
4182 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
4183 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
4184 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
4186 hoist_vbein = sbitmap_vector_alloc (n_blocks, n_exprs);
4187 hoist_vbeout = sbitmap_vector_alloc (n_blocks, n_exprs);
4190 /* Free vars used for code hoisting analysis. */
4193 free_code_hoist_mem (void)
4195 sbitmap_vector_free (antloc);
4196 sbitmap_vector_free (transp);
4197 sbitmap_vector_free (comp);
4199 sbitmap_vector_free (hoist_vbein);
4200 sbitmap_vector_free (hoist_vbeout);
4202 free_dominance_info (CDI_DOMINATORS);
4205 /* Compute the very busy expressions at entry/exit from each block.
4207 An expression is very busy if all paths from a given point
4208 compute the expression. */
4211 compute_code_hoist_vbeinout (void)
4213 int changed, passes;
4216 sbitmap_vector_zero (hoist_vbeout, last_basic_block);
4217 sbitmap_vector_zero (hoist_vbein, last_basic_block);
4226 /* We scan the blocks in the reverse order to speed up
4228 FOR_EACH_BB_REVERSE (bb)
4230 if (bb->next_bb != EXIT_BLOCK_PTR)
4232 sbitmap_intersection_of_succs (hoist_vbeout[bb->index],
4233 hoist_vbein, bb->index);
4235 /* Include expressions in VBEout that are calculated
4236 in BB and available at its end. */
4237 sbitmap_a_or_b (hoist_vbeout[bb->index],
4238 hoist_vbeout[bb->index], comp[bb->index]);
4241 changed |= sbitmap_a_or_b_and_c_cg (hoist_vbein[bb->index],
4243 hoist_vbeout[bb->index],
4252 fprintf (dump_file, "hoisting vbeinout computation: %d passes\n", passes);
4256 fprintf (dump_file, "vbein (%d): ", bb->index);
4257 dump_sbitmap_file (dump_file, hoist_vbein[bb->index]);
4258 fprintf (dump_file, "vbeout(%d): ", bb->index);
4259 dump_sbitmap_file (dump_file, hoist_vbeout[bb->index]);
4264 /* Top level routine to do the dataflow analysis needed by code hoisting. */
4267 compute_code_hoist_data (void)
4269 compute_local_properties (transp, comp, antloc, &expr_hash_table);
4270 prune_expressions (false);
4271 compute_code_hoist_vbeinout ();
4272 calculate_dominance_info (CDI_DOMINATORS);
4274 fprintf (dump_file, "\n");
4277 /* Determine if the expression identified by EXPR_INDEX would
4278 reach BB unimpared if it was placed at the end of EXPR_BB.
4279 Stop the search if the expression would need to be moved more
4280 than DISTANCE instructions.
4282 It's unclear exactly what Muchnick meant by "unimpared". It seems
4283 to me that the expression must either be computed or transparent in
4284 *every* block in the path(s) from EXPR_BB to BB. Any other definition
4285 would allow the expression to be hoisted out of loops, even if
4286 the expression wasn't a loop invariant.
4288 Contrast this to reachability for PRE where an expression is
4289 considered reachable if *any* path reaches instead of *all*
4293 hoist_expr_reaches_here_p (basic_block expr_bb, int expr_index, basic_block bb,
4294 char *visited, int distance, int *bb_size)
4298 int visited_allocated_locally = 0;
4300 /* Terminate the search if distance, for which EXPR is allowed to move,
4304 distance -= bb_size[bb->index];
4310 gcc_assert (distance == 0);
4312 if (visited == NULL)
4314 visited_allocated_locally = 1;
4315 visited = XCNEWVEC (char, last_basic_block);
4318 FOR_EACH_EDGE (pred, ei, bb->preds)
4320 basic_block pred_bb = pred->src;
4322 if (pred->src == ENTRY_BLOCK_PTR)
4324 else if (pred_bb == expr_bb)
4326 else if (visited[pred_bb->index])
4329 else if (! TEST_BIT (transp[pred_bb->index], expr_index))
4335 visited[pred_bb->index] = 1;
4336 if (! hoist_expr_reaches_here_p (expr_bb, expr_index, pred_bb,
4337 visited, distance, bb_size))
4341 if (visited_allocated_locally)
4344 return (pred == NULL);
4347 /* Find occurence in BB. */
4348 static struct occr *
4349 find_occr_in_bb (struct occr *occr, basic_block bb)
4351 /* Find the right occurrence of this expression. */
4352 while (occr && BLOCK_FOR_INSN (occr->insn) != bb)
4358 /* Actually perform code hoisting. */
4363 basic_block bb, dominated;
4364 VEC (basic_block, heap) *dom_tree_walk;
4365 unsigned int dom_tree_walk_index;
4366 VEC (basic_block, heap) *domby;
4368 struct expr **index_map;
4374 /* Compute a mapping from expression number (`bitmap_index') to
4375 hash table entry. */
4377 index_map = XCNEWVEC (struct expr *, expr_hash_table.n_elems);
4378 for (i = 0; i < expr_hash_table.size; i++)
4379 for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
4380 index_map[expr->bitmap_index] = expr;
4382 /* Calculate sizes of basic blocks and note how far
4383 each instruction is from the start of its block. We then use this
4384 data to restrict distance an expression can travel. */
4386 to_bb_head = XCNEWVEC (int, get_max_uid ());
4387 bb_size = XCNEWVEC (int, last_basic_block);
4395 insn = BB_HEAD (bb);
4396 bb_end = BB_END (bb);
4399 while (insn != bb_end)
4401 /* Don't count debug instructions to avoid them affecting
4402 decision choices. */
4403 if (NONDEBUG_INSN_P (insn))
4404 to_bb_head[INSN_UID (insn)] = to_head++;
4406 insn = NEXT_INSN (insn);
4409 bb_size[bb->index] = to_head;
4412 gcc_assert (EDGE_COUNT (ENTRY_BLOCK_PTR->succs) == 1
4413 && (EDGE_SUCC (ENTRY_BLOCK_PTR, 0)->dest
4414 == ENTRY_BLOCK_PTR->next_bb));
4416 dom_tree_walk = get_all_dominated_blocks (CDI_DOMINATORS,
4417 ENTRY_BLOCK_PTR->next_bb);
4419 /* Walk over each basic block looking for potentially hoistable
4420 expressions, nothing gets hoisted from the entry block. */
4421 for (dom_tree_walk_index = 0;
4422 VEC_iterate (basic_block, dom_tree_walk, dom_tree_walk_index, bb);
4423 dom_tree_walk_index++)
4425 domby = get_dominated_to_depth (CDI_DOMINATORS, bb, MAX_HOIST_DEPTH);
4427 if (VEC_length (basic_block, domby) == 0)
4430 /* Examine each expression that is very busy at the exit of this
4431 block. These are the potentially hoistable expressions. */
4432 for (i = 0; i < hoist_vbeout[bb->index]->n_bits; i++)
4434 if (TEST_BIT (hoist_vbeout[bb->index], i))
4436 /* Current expression. */
4437 struct expr *expr = index_map[i];
4438 /* Number of occurences of EXPR that can be hoisted to BB. */
4440 /* Basic blocks that have occurences reachable from BB. */
4441 bitmap_head _from_bbs, *from_bbs = &_from_bbs;
4442 /* Occurences reachable from BB. */
4443 VEC (occr_t, heap) *occrs_to_hoist = NULL;
4444 /* We want to insert the expression into BB only once, so
4445 note when we've inserted it. */
4446 int insn_inserted_p;
4449 bitmap_initialize (from_bbs, 0);
4451 /* If an expression is computed in BB and is available at end of
4452 BB, hoist all occurences dominated by BB to BB. */
4453 if (TEST_BIT (comp[bb->index], i))
4455 occr = find_occr_in_bb (expr->antic_occr, bb);
4459 /* An occurence might've been already deleted
4460 while processing a dominator of BB. */
4461 if (occr->deleted_p)
4462 gcc_assert (MAX_HOIST_DEPTH > 1);
4465 gcc_assert (NONDEBUG_INSN_P (occr->insn));
4473 /* We've found a potentially hoistable expression, now
4474 we look at every block BB dominates to see if it
4475 computes the expression. */
4476 for (j = 0; VEC_iterate (basic_block, domby, j, dominated); j++)
4480 /* Ignore self dominance. */
4481 if (bb == dominated)
4483 /* We've found a dominated block, now see if it computes
4484 the busy expression and whether or not moving that
4485 expression to the "beginning" of that block is safe. */
4486 if (!TEST_BIT (antloc[dominated->index], i))
4489 occr = find_occr_in_bb (expr->antic_occr, dominated);
4492 /* An occurence might've been already deleted
4493 while processing a dominator of BB. */
4494 if (occr->deleted_p)
4496 gcc_assert (MAX_HOIST_DEPTH > 1);
4499 gcc_assert (NONDEBUG_INSN_P (occr->insn));
4501 max_distance = expr->max_distance;
4502 if (max_distance > 0)
4503 /* Adjust MAX_DISTANCE to account for the fact that
4504 OCCR won't have to travel all of DOMINATED, but
4506 max_distance += (bb_size[dominated->index]
4507 - to_bb_head[INSN_UID (occr->insn)]);
4509 /* Note if the expression would reach the dominated block
4510 unimpared if it was placed at the end of BB.
4512 Keep track of how many times this expression is hoistable
4513 from a dominated block into BB. */
4514 if (hoist_expr_reaches_here_p (bb, i, dominated, NULL,
4515 max_distance, bb_size))
4518 VEC_safe_push (occr_t, heap,
4519 occrs_to_hoist, occr);
4520 bitmap_set_bit (from_bbs, dominated->index);
4524 /* If we found more than one hoistable occurrence of this
4525 expression, then note it in the vector of expressions to
4526 hoist. It makes no sense to hoist things which are computed
4527 in only one BB, and doing so tends to pessimize register
4528 allocation. One could increase this value to try harder
4529 to avoid any possible code expansion due to register
4530 allocation issues; however experiments have shown that
4531 the vast majority of hoistable expressions are only movable
4532 from two successors, so raising this threshold is likely
4533 to nullify any benefit we get from code hoisting. */
4534 if (hoistable > 1 && dbg_cnt (hoist_insn))
4536 /* If (hoistable != VEC_length), then there is
4537 an occurence of EXPR in BB itself. Don't waste
4538 time looking for LCA in this case. */
4539 if ((unsigned) hoistable
4540 == VEC_length (occr_t, occrs_to_hoist))
4544 lca = nearest_common_dominator_for_set (CDI_DOMINATORS,
4547 /* Punt, it's better to hoist these occurences to
4549 VEC_free (occr_t, heap, occrs_to_hoist);
4553 /* Punt, no point hoisting a single occurence. */
4554 VEC_free (occr_t, heap, occrs_to_hoist);
4556 insn_inserted_p = 0;
4558 /* Walk through occurences of I'th expressions we want
4559 to hoist to BB and make the transformations. */
4561 VEC_iterate (occr_t, occrs_to_hoist, j, occr);
4567 gcc_assert (!occr->deleted_p);
4570 set = single_set (insn);
4573 /* Create a pseudo-reg to store the result of reaching
4574 expressions into. Get the mode for the new pseudo
4575 from the mode of the original destination pseudo.
4577 It is important to use new pseudos whenever we
4578 emit a set. This will allow reload to use
4579 rematerialization for such registers. */
4580 if (!insn_inserted_p)
4582 = gen_reg_rtx_and_attrs (SET_DEST (set));
4584 gcse_emit_move_after (expr->reaching_reg, SET_DEST (set),
4587 occr->deleted_p = 1;
4591 if (!insn_inserted_p)
4593 insert_insn_end_basic_block (expr, bb);
4594 insn_inserted_p = 1;
4598 VEC_free (occr_t, heap, occrs_to_hoist);
4599 bitmap_clear (from_bbs);
4602 VEC_free (basic_block, heap, domby);
4605 VEC_free (basic_block, heap, dom_tree_walk);
4613 /* Top level routine to perform one code hoisting (aka unification) pass
4615 Return nonzero if a change was made. */
4618 one_code_hoisting_pass (void)
4622 gcse_subst_count = 0;
4623 gcse_create_count = 0;
4625 /* Return if there's nothing to do, or it is too expensive. */
4626 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
4627 || is_too_expensive (_("GCSE disabled")))
4630 doing_code_hoisting_p = true;
4632 /* We need alias. */
4633 init_alias_analysis ();
4636 gcc_obstack_init (&gcse_obstack);
4639 alloc_hash_table (&expr_hash_table, 0);
4640 compute_hash_table (&expr_hash_table);
4642 dump_hash_table (dump_file, "Code Hosting Expressions", &expr_hash_table);
4644 if (expr_hash_table.n_elems > 0)
4646 alloc_code_hoist_mem (last_basic_block, expr_hash_table.n_elems);
4647 compute_code_hoist_data ();
4648 changed = hoist_code ();
4649 free_code_hoist_mem ();
4652 free_hash_table (&expr_hash_table);
4654 obstack_free (&gcse_obstack, NULL);
4656 /* We are finished with alias. */
4657 end_alias_analysis ();
4661 fprintf (dump_file, "HOIST of %s, %d basic blocks, %d bytes needed, ",
4662 current_function_name (), n_basic_blocks, bytes_used);
4663 fprintf (dump_file, "%d substs, %d insns created\n",
4664 gcse_subst_count, gcse_create_count);
4667 doing_code_hoisting_p = false;
4672 /* Here we provide the things required to do store motion towards
4673 the exit. In order for this to be effective, gcse also needed to
4674 be taught how to move a load when it is kill only by a store to itself.
4679 void foo(float scale)
4681 for (i=0; i<10; i++)
4685 'i' is both loaded and stored to in the loop. Normally, gcse cannot move
4686 the load out since its live around the loop, and stored at the bottom
4689 The 'Load Motion' referred to and implemented in this file is
4690 an enhancement to gcse which when using edge based lcm, recognizes
4691 this situation and allows gcse to move the load out of the loop.
4693 Once gcse has hoisted the load, store motion can then push this
4694 load towards the exit, and we end up with no loads or stores of 'i'
4698 pre_ldst_expr_hash (const void *p)
4700 int do_not_record_p = 0;
4701 const struct ls_expr *const x = (const struct ls_expr *) p;
4702 return hash_rtx (x->pattern, GET_MODE (x->pattern), &do_not_record_p, NULL, false);
4706 pre_ldst_expr_eq (const void *p1, const void *p2)
4708 const struct ls_expr *const ptr1 = (const struct ls_expr *) p1,
4709 *const ptr2 = (const struct ls_expr *) p2;
4710 return expr_equiv_p (ptr1->pattern, ptr2->pattern);
4713 /* This will search the ldst list for a matching expression. If it
4714 doesn't find one, we create one and initialize it. */
4716 static struct ls_expr *
4719 int do_not_record_p = 0;
4720 struct ls_expr * ptr;
4725 hash = hash_rtx (x, GET_MODE (x), &do_not_record_p,
4726 NULL, /*have_reg_qty=*/false);
4729 slot = htab_find_slot_with_hash (pre_ldst_table, &e, hash, INSERT);
4731 return (struct ls_expr *)*slot;
4733 ptr = XNEW (struct ls_expr);
4735 ptr->next = pre_ldst_mems;
4738 ptr->pattern_regs = NULL_RTX;
4739 ptr->loads = NULL_RTX;
4740 ptr->stores = NULL_RTX;
4741 ptr->reaching_reg = NULL_RTX;
4744 ptr->hash_index = hash;
4745 pre_ldst_mems = ptr;
4751 /* Free up an individual ldst entry. */
4754 free_ldst_entry (struct ls_expr * ptr)
4756 free_INSN_LIST_list (& ptr->loads);
4757 free_INSN_LIST_list (& ptr->stores);
4762 /* Free up all memory associated with the ldst list. */
4765 free_ldst_mems (void)
4768 htab_delete (pre_ldst_table);
4769 pre_ldst_table = NULL;
4771 while (pre_ldst_mems)
4773 struct ls_expr * tmp = pre_ldst_mems;
4775 pre_ldst_mems = pre_ldst_mems->next;
4777 free_ldst_entry (tmp);
4780 pre_ldst_mems = NULL;
4783 /* Dump debugging info about the ldst list. */
4786 print_ldst_list (FILE * file)
4788 struct ls_expr * ptr;
4790 fprintf (file, "LDST list: \n");
4792 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
4794 fprintf (file, " Pattern (%3d): ", ptr->index);
4796 print_rtl (file, ptr->pattern);
4798 fprintf (file, "\n Loads : ");
4801 print_rtl (file, ptr->loads);
4803 fprintf (file, "(nil)");
4805 fprintf (file, "\n Stores : ");
4808 print_rtl (file, ptr->stores);
4810 fprintf (file, "(nil)");
4812 fprintf (file, "\n\n");
4815 fprintf (file, "\n");
4818 /* Returns 1 if X is in the list of ldst only expressions. */
4820 static struct ls_expr *
4821 find_rtx_in_ldst (rtx x)
4825 if (!pre_ldst_table)
4828 slot = htab_find_slot (pre_ldst_table, &e, NO_INSERT);
4829 if (!slot || ((struct ls_expr *)*slot)->invalid)
4831 return (struct ls_expr *) *slot;
4834 /* Return first item in the list. */
4836 static inline struct ls_expr *
4837 first_ls_expr (void)
4839 return pre_ldst_mems;
4842 /* Return the next item in the list after the specified one. */
4844 static inline struct ls_expr *
4845 next_ls_expr (struct ls_expr * ptr)
4850 /* Load Motion for loads which only kill themselves. */
4852 /* Return true if x is a simple MEM operation, with no registers or
4853 side effects. These are the types of loads we consider for the
4854 ld_motion list, otherwise we let the usual aliasing take care of it. */
4857 simple_mem (const_rtx x)
4862 if (MEM_VOLATILE_P (x))
4865 if (GET_MODE (x) == BLKmode)
4868 /* If we are handling exceptions, we must be careful with memory references
4869 that may trap. If we are not, the behavior is undefined, so we may just
4871 if (cfun->can_throw_non_call_exceptions && may_trap_p (x))
4874 if (side_effects_p (x))
4877 /* Do not consider function arguments passed on stack. */
4878 if (reg_mentioned_p (stack_pointer_rtx, x))
4881 if (flag_float_store && FLOAT_MODE_P (GET_MODE (x)))
4887 /* Make sure there isn't a buried reference in this pattern anywhere.
4888 If there is, invalidate the entry for it since we're not capable
4889 of fixing it up just yet.. We have to be sure we know about ALL
4890 loads since the aliasing code will allow all entries in the
4891 ld_motion list to not-alias itself. If we miss a load, we will get
4892 the wrong value since gcse might common it and we won't know to
4896 invalidate_any_buried_refs (rtx x)
4900 struct ls_expr * ptr;
4902 /* Invalidate it in the list. */
4903 if (MEM_P (x) && simple_mem (x))
4905 ptr = ldst_entry (x);
4909 /* Recursively process the insn. */
4910 fmt = GET_RTX_FORMAT (GET_CODE (x));
4912 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
4915 invalidate_any_buried_refs (XEXP (x, i));
4916 else if (fmt[i] == 'E')
4917 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4918 invalidate_any_buried_refs (XVECEXP (x, i, j));
4922 /* Find all the 'simple' MEMs which are used in LOADs and STORES. Simple
4923 being defined as MEM loads and stores to symbols, with no side effects
4924 and no registers in the expression. For a MEM destination, we also
4925 check that the insn is still valid if we replace the destination with a
4926 REG, as is done in update_ld_motion_stores. If there are any uses/defs
4927 which don't match this criteria, they are invalidated and trimmed out
4931 compute_ld_motion_mems (void)
4933 struct ls_expr * ptr;
4937 pre_ldst_mems = NULL;
4938 pre_ldst_table = htab_create (13, pre_ldst_expr_hash,
4939 pre_ldst_expr_eq, NULL);
4943 FOR_BB_INSNS (bb, insn)
4945 if (NONDEBUG_INSN_P (insn))
4947 if (GET_CODE (PATTERN (insn)) == SET)
4949 rtx src = SET_SRC (PATTERN (insn));
4950 rtx dest = SET_DEST (PATTERN (insn));
4952 /* Check for a simple LOAD... */
4953 if (MEM_P (src) && simple_mem (src))
4955 ptr = ldst_entry (src);
4957 ptr->loads = alloc_INSN_LIST (insn, ptr->loads);
4963 /* Make sure there isn't a buried load somewhere. */
4964 invalidate_any_buried_refs (src);
4967 /* Check for stores. Don't worry about aliased ones, they
4968 will block any movement we might do later. We only care
4969 about this exact pattern since those are the only
4970 circumstance that we will ignore the aliasing info. */
4971 if (MEM_P (dest) && simple_mem (dest))
4973 ptr = ldst_entry (dest);
4976 && GET_CODE (src) != ASM_OPERANDS
4977 /* Check for REG manually since want_to_gcse_p
4978 returns 0 for all REGs. */
4979 && can_assign_to_reg_without_clobbers_p (src))
4980 ptr->stores = alloc_INSN_LIST (insn, ptr->stores);
4986 invalidate_any_buried_refs (PATTERN (insn));
4992 /* Remove any references that have been either invalidated or are not in the
4993 expression list for pre gcse. */
4996 trim_ld_motion_mems (void)
4998 struct ls_expr * * last = & pre_ldst_mems;
4999 struct ls_expr * ptr = pre_ldst_mems;
5005 /* Delete if entry has been made invalid. */
5008 /* Delete if we cannot find this mem in the expression list. */
5009 unsigned int hash = ptr->hash_index % expr_hash_table.size;
5011 for (expr = expr_hash_table.table[hash];
5013 expr = expr->next_same_hash)
5014 if (expr_equiv_p (expr->expr, ptr->pattern))
5018 expr = (struct expr *) 0;
5022 /* Set the expression field if we are keeping it. */
5030 htab_remove_elt_with_hash (pre_ldst_table, ptr, ptr->hash_index);
5031 free_ldst_entry (ptr);
5036 /* Show the world what we've found. */
5037 if (dump_file && pre_ldst_mems != NULL)
5038 print_ldst_list (dump_file);
5041 /* This routine will take an expression which we are replacing with
5042 a reaching register, and update any stores that are needed if
5043 that expression is in the ld_motion list. Stores are updated by
5044 copying their SRC to the reaching register, and then storing
5045 the reaching register into the store location. These keeps the
5046 correct value in the reaching register for the loads. */
5049 update_ld_motion_stores (struct expr * expr)
5051 struct ls_expr * mem_ptr;
5053 if ((mem_ptr = find_rtx_in_ldst (expr->expr)))
5055 /* We can try to find just the REACHED stores, but is shouldn't
5056 matter to set the reaching reg everywhere... some might be
5057 dead and should be eliminated later. */
5059 /* We replace (set mem expr) with (set reg expr) (set mem reg)
5060 where reg is the reaching reg used in the load. We checked in
5061 compute_ld_motion_mems that we can replace (set mem expr) with
5062 (set reg expr) in that insn. */
5063 rtx list = mem_ptr->stores;
5065 for ( ; list != NULL_RTX; list = XEXP (list, 1))
5067 rtx insn = XEXP (list, 0);
5068 rtx pat = PATTERN (insn);
5069 rtx src = SET_SRC (pat);
5070 rtx reg = expr->reaching_reg;
5073 /* If we've already copied it, continue. */
5074 if (expr->reaching_reg == src)
5079 fprintf (dump_file, "PRE: store updated with reaching reg ");
5080 print_rtl (dump_file, expr->reaching_reg);
5081 fprintf (dump_file, ":\n ");
5082 print_inline_rtx (dump_file, insn, 8);
5083 fprintf (dump_file, "\n");
5086 copy = gen_move_insn (reg, copy_rtx (SET_SRC (pat)));
5087 emit_insn_before (copy, insn);
5088 SET_SRC (pat) = reg;
5089 df_insn_rescan (insn);
5091 /* un-recognize this pattern since it's probably different now. */
5092 INSN_CODE (insn) = -1;
5093 gcse_create_count++;
5098 /* Return true if the graph is too expensive to optimize. PASS is the
5099 optimization about to be performed. */
5102 is_too_expensive (const char *pass)
5104 /* Trying to perform global optimizations on flow graphs which have
5105 a high connectivity will take a long time and is unlikely to be
5106 particularly useful.
5108 In normal circumstances a cfg should have about twice as many
5109 edges as blocks. But we do not want to punish small functions
5110 which have a couple switch statements. Rather than simply
5111 threshold the number of blocks, uses something with a more
5112 graceful degradation. */
5113 if (n_edges > 20000 + n_basic_blocks * 4)
5115 warning (OPT_Wdisabled_optimization,
5116 "%s: %d basic blocks and %d edges/basic block",
5117 pass, n_basic_blocks, n_edges / n_basic_blocks);
5122 /* If allocating memory for the cprop bitmap would take up too much
5123 storage it's better just to disable the optimization. */
5125 * SBITMAP_SET_SIZE (max_reg_num ())
5126 * sizeof (SBITMAP_ELT_TYPE)) > MAX_GCSE_MEMORY)
5128 warning (OPT_Wdisabled_optimization,
5129 "%s: %d basic blocks and %d registers",
5130 pass, n_basic_blocks, max_reg_num ());
5139 /* Main function for the CPROP pass. */
5142 one_cprop_pass (void)
5146 /* Return if there's nothing to do, or it is too expensive. */
5147 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
5148 || is_too_expensive (_ ("const/copy propagation disabled")))
5151 global_const_prop_count = local_const_prop_count = 0;
5152 global_copy_prop_count = local_copy_prop_count = 0;
5155 gcc_obstack_init (&gcse_obstack);
5158 /* Do a local const/copy propagation pass first. The global pass
5159 only handles global opportunities.
5160 If the local pass changes something, remove any unreachable blocks
5161 because the CPROP global dataflow analysis may get into infinite
5162 loops for CFGs with unreachable blocks.
5164 FIXME: This local pass should not be necessary after CSE (but for
5165 some reason it still is). It is also (proven) not necessary
5166 to run the local pass right after FWPWOP.
5168 FIXME: The global analysis would not get into infinite loops if it
5169 would use the DF solver (via df_simple_dataflow) instead of
5170 the solver implemented in this file. */
5171 if (local_cprop_pass ())
5173 delete_unreachable_blocks ();
5177 /* Determine implicit sets. */
5178 implicit_sets = XCNEWVEC (rtx, last_basic_block);
5179 find_implicit_sets ();
5181 alloc_hash_table (&set_hash_table, 1);
5182 compute_hash_table (&set_hash_table);
5184 /* Free implicit_sets before peak usage. */
5185 free (implicit_sets);
5186 implicit_sets = NULL;
5189 dump_hash_table (dump_file, "SET", &set_hash_table);
5190 if (set_hash_table.n_elems > 0)
5195 alloc_cprop_mem (last_basic_block, set_hash_table.n_elems);
5196 compute_cprop_data ();
5198 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb, EXIT_BLOCK_PTR, next_bb)
5200 /* Reset tables used to keep track of what's still valid [since
5201 the start of the block]. */
5202 reset_opr_set_tables ();
5204 FOR_BB_INSNS (bb, insn)
5207 changed |= cprop_insn (insn);
5209 /* Keep track of everything modified by this insn. */
5210 /* ??? Need to be careful w.r.t. mods done to INSN.
5211 Don't call mark_oprs_set if we turned the
5212 insn into a NOTE. */
5213 if (! NOTE_P (insn))
5214 mark_oprs_set (insn);
5218 changed |= bypass_conditional_jumps ();
5222 free_hash_table (&set_hash_table);
5224 obstack_free (&gcse_obstack, NULL);
5228 fprintf (dump_file, "CPROP of %s, %d basic blocks, %d bytes needed, ",
5229 current_function_name (), n_basic_blocks, bytes_used);
5230 fprintf (dump_file, "%d local const props, %d local copy props, ",
5231 local_const_prop_count, local_copy_prop_count);
5232 fprintf (dump_file, "%d global const props, %d global copy props\n\n",
5233 global_const_prop_count, global_copy_prop_count);
5240 /* All the passes implemented in this file. Each pass has its
5241 own gate and execute function, and at the end of the file a
5242 pass definition for passes.c.
5244 We do not construct an accurate cfg in functions which call
5245 setjmp, so none of these passes runs if the function calls
5247 FIXME: Should just handle setjmp via REG_SETJMP notes. */
5250 gate_rtl_cprop (void)
5252 return optimize > 0 && flag_gcse
5253 && !cfun->calls_setjmp
5258 execute_rtl_cprop (void)
5260 delete_unreachable_blocks ();
5261 df_set_flags (DF_LR_RUN_DCE);
5263 flag_rerun_cse_after_global_opts |= one_cprop_pass ();
5270 return optimize > 0 && flag_gcse
5271 && !cfun->calls_setjmp
5272 && optimize_function_for_speed_p (cfun)
5277 execute_rtl_pre (void)
5279 delete_unreachable_blocks ();
5281 flag_rerun_cse_after_global_opts |= one_pre_gcse_pass ();
5286 gate_rtl_hoist (void)
5288 return optimize > 0 && flag_gcse
5289 && !cfun->calls_setjmp
5290 /* It does not make sense to run code hoisting unless we are optimizing
5291 for code size -- it rarely makes programs faster, and can make then
5292 bigger if we did PRE (when optimizing for space, we don't run PRE). */
5293 && optimize_function_for_size_p (cfun)
5298 execute_rtl_hoist (void)
5300 delete_unreachable_blocks ();
5302 flag_rerun_cse_after_global_opts |= one_code_hoisting_pass ();
5306 struct rtl_opt_pass pass_rtl_cprop =
5311 gate_rtl_cprop, /* gate */
5312 execute_rtl_cprop, /* execute */
5315 0, /* static_pass_number */
5316 TV_CPROP, /* tv_id */
5317 PROP_cfglayout, /* properties_required */
5318 0, /* properties_provided */
5319 0, /* properties_destroyed */
5320 0, /* todo_flags_start */
5321 TODO_df_finish | TODO_verify_rtl_sharing |
5323 TODO_verify_flow | TODO_ggc_collect /* todo_flags_finish */
5327 struct rtl_opt_pass pass_rtl_pre =
5331 "rtl pre", /* name */
5332 gate_rtl_pre, /* gate */
5333 execute_rtl_pre, /* execute */
5336 0, /* static_pass_number */
5338 PROP_cfglayout, /* properties_required */
5339 0, /* properties_provided */
5340 0, /* properties_destroyed */
5341 0, /* todo_flags_start */
5342 TODO_df_finish | TODO_verify_rtl_sharing |
5344 TODO_verify_flow | TODO_ggc_collect /* todo_flags_finish */
5348 struct rtl_opt_pass pass_rtl_hoist =
5353 gate_rtl_hoist, /* gate */
5354 execute_rtl_hoist, /* execute */
5357 0, /* static_pass_number */
5358 TV_HOIST, /* tv_id */
5359 PROP_cfglayout, /* properties_required */
5360 0, /* properties_provided */
5361 0, /* properties_destroyed */
5362 0, /* todo_flags_start */
5363 TODO_df_finish | TODO_verify_rtl_sharing |
5365 TODO_verify_flow | TODO_ggc_collect /* todo_flags_finish */
5369 #include "gt-gcse.h"