1 /* Global common subexpression elimination/Partial redundancy elimination
2 and global constant/copy propagation for GNU compiler.
3 Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
4 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
23 - reordering of memory allocation and freeing to be more space efficient
24 - do rough calc of how many regs are needed in each block, and a rough
25 calc of how many regs are available in each class and use that to
26 throttle back the code in cases where RTX_COST is minimal.
27 - a store to the same address as a load does not kill the load if the
28 source of the store is also the destination of the load. Handling this
29 allows more load motion, particularly out of loops.
33 /* References searched while implementing this.
35 Compilers Principles, Techniques and Tools
39 Global Optimization by Suppression of Partial Redundancies
41 communications of the acm, Vol. 22, Num. 2, Feb. 1979
43 A Portable Machine-Independent Global Optimizer - Design and Measurements
45 Stanford Ph.D. thesis, Dec. 1983
47 A Fast Algorithm for Code Movement Optimization
49 SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988
51 A Solution to a Problem with Morel and Renvoise's
52 Global Optimization by Suppression of Partial Redundancies
53 K-H Drechsler, M.P. Stadel
54 ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988
56 Practical Adaptation of the Global Optimization
57 Algorithm of Morel and Renvoise
59 ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991
61 Efficiently Computing Static Single Assignment Form and the Control
63 R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck
64 ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991
67 J. Knoop, O. Ruthing, B. Steffen
68 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
70 What's In a Region? Or Computing Control Dependence Regions in Near-Linear
71 Time for Reducible Flow Control
73 ACM Letters on Programming Languages and Systems,
74 Vol. 2, Num. 1-4, Mar-Dec 1993
76 An Efficient Representation for Sparse Sets
77 Preston Briggs, Linda Torczon
78 ACM Letters on Programming Languages and Systems,
79 Vol. 2, Num. 1-4, Mar-Dec 1993
81 A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion
82 K-H Drechsler, M.P. Stadel
83 ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993
85 Partial Dead Code Elimination
86 J. Knoop, O. Ruthing, B. Steffen
87 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
89 Effective Partial Redundancy Elimination
90 P. Briggs, K.D. Cooper
91 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
93 The Program Structure Tree: Computing Control Regions in Linear Time
94 R. Johnson, D. Pearson, K. Pingali
95 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
97 Optimal Code Motion: Theory and Practice
98 J. Knoop, O. Ruthing, B. Steffen
99 ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994
101 The power of assignment motion
102 J. Knoop, O. Ruthing, B. Steffen
103 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
105 Global code motion / global value numbering
107 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
109 Value Driven Redundancy Elimination
111 Rice University Ph.D. thesis, Apr. 1996
115 Massively Scalar Compiler Project, Rice University, Sep. 1996
117 High Performance Compilers for Parallel Computing
121 Advanced Compiler Design and Implementation
123 Morgan Kaufmann, 1997
125 Building an Optimizing Compiler
129 People wishing to speed up the code here should read:
130 Elimination Algorithms for Data Flow Analysis
131 B.G. Ryder, M.C. Paull
132 ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986
134 How to Analyze Large Programs Efficiently and Informatively
135 D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck
136 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
138 People wishing to do something different can find various possibilities
139 in the above papers and elsewhere.
144 #include "coretypes.h"
146 #include "diagnostic-core.h"
153 #include "hard-reg-set.h"
155 #include "insn-config.h"
157 #include "basic-block.h"
159 #include "function.h"
168 #include "tree-pass.h"
175 /* We support GCSE via Partial Redundancy Elimination. PRE optimizations
176 are a superset of those done by classic GCSE.
178 We perform the following steps:
180 1) Compute table of places where registers are set.
182 2) Perform copy/constant propagation.
184 3) Perform global cse using lazy code motion if not optimizing
185 for size, or code hoisting if we are.
187 4) Perform another pass of copy/constant propagation. Try to bypass
188 conditional jumps if the condition can be computed from a value of
191 Two passes of copy/constant propagation are done because the first one
192 enables more GCSE and the second one helps to clean up the copies that
193 GCSE creates. This is needed more for PRE than for Classic because Classic
194 GCSE will try to use an existing register containing the common
195 subexpression rather than create a new one. This is harder to do for PRE
196 because of the code motion (which Classic GCSE doesn't do).
198 Expressions we are interested in GCSE-ing are of the form
199 (set (pseudo-reg) (expression)).
200 Function want_to_gcse_p says what these are.
202 In addition, expressions in REG_EQUAL notes are candidates for GCSE-ing.
203 This allows PRE to hoist expressions that are expressed in multiple insns,
204 such as complex address calculations (e.g. for PIC code, or loads with a
205 high part and a low part).
207 PRE handles moving invariant expressions out of loops (by treating them as
208 partially redundant).
210 **********************
212 We used to support multiple passes but there are diminishing returns in
213 doing so. The first pass usually makes 90% of the changes that are doable.
214 A second pass can make a few more changes made possible by the first pass.
215 Experiments show any further passes don't make enough changes to justify
218 A study of spec92 using an unlimited number of passes:
219 [1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83,
220 [6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2,
221 [12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1
223 It was found doing copy propagation between each pass enables further
226 This study was done before expressions in REG_EQUAL notes were added as
227 candidate expressions for optimization, and before the GIMPLE optimizers
228 were added. Probably, multiple passes is even less efficient now than
229 at the time when the study was conducted.
231 PRE is quite expensive in complicated functions because the DFA can take
232 a while to converge. Hence we only perform one pass.
234 **********************
236 The steps for PRE are:
238 1) Build the hash table of expressions we wish to GCSE (expr_hash_table).
240 2) Perform the data flow analysis for PRE.
242 3) Delete the redundant instructions
244 4) Insert the required copies [if any] that make the partially
245 redundant instructions fully redundant.
247 5) For other reaching expressions, insert an instruction to copy the value
248 to a newly created pseudo that will reach the redundant instruction.
250 The deletion is done first so that when we do insertions we
251 know which pseudo reg to use.
253 Various papers have argued that PRE DFA is expensive (O(n^2)) and others
254 argue it is not. The number of iterations for the algorithm to converge
255 is typically 2-4 so I don't view it as that expensive (relatively speaking).
257 PRE GCSE depends heavily on the second CPROP pass to clean up the copies
258 we create. To make an expression reach the place where it's redundant,
259 the result of the expression is copied to a new register, and the redundant
260 expression is deleted by replacing it with this new register. Classic GCSE
261 doesn't have this problem as much as it computes the reaching defs of
262 each register in each block and thus can try to use an existing
265 /* GCSE global vars. */
267 struct target_gcse default_target_gcse;
268 #if SWITCHABLE_TARGET
269 struct target_gcse *this_target_gcse = &default_target_gcse;
272 /* Set to non-zero if CSE should run after all GCSE optimizations are done. */
273 int flag_rerun_cse_after_global_opts;
275 /* An obstack for our working variables. */
276 static struct obstack gcse_obstack;
278 struct reg_use {rtx reg_rtx; };
280 /* Hash table of expressions. */
284 /* The expression (SET_SRC for expressions, PATTERN for assignments). */
286 /* Index in the available expression bitmaps. */
288 /* Next entry with the same hash. */
289 struct expr *next_same_hash;
290 /* List of anticipatable occurrences in basic blocks in the function.
291 An "anticipatable occurrence" is one that is the first occurrence in the
292 basic block, the operands are not modified in the basic block prior
293 to the occurrence and the output is not used between the start of
294 the block and the occurrence. */
295 struct occr *antic_occr;
296 /* List of available occurrence in basic blocks in the function.
297 An "available occurrence" is one that is the last occurrence in the
298 basic block and the operands are not modified by following statements in
299 the basic block [including this insn]. */
300 struct occr *avail_occr;
301 /* Non-null if the computation is PRE redundant.
302 The value is the newly created pseudo-reg to record a copy of the
303 expression in all the places that reach the redundant copy. */
305 /* Maximum distance in instructions this expression can travel.
306 We avoid moving simple expressions for more than a few instructions
307 to keep register pressure under control.
308 A value of "0" removes restrictions on how far the expression can
313 /* Occurrence of an expression.
314 There is one per basic block. If a pattern appears more than once the
315 last appearance is used [or first for anticipatable expressions]. */
319 /* Next occurrence of this expression. */
321 /* The insn that computes the expression. */
323 /* Nonzero if this [anticipatable] occurrence has been deleted. */
325 /* Nonzero if this [available] occurrence has been copied to
327 /* ??? This is mutually exclusive with deleted_p, so they could share
332 typedef struct occr *occr_t;
334 DEF_VEC_ALLOC_P (occr_t, heap);
336 /* Expression and copy propagation hash tables.
337 Each hash table is an array of buckets.
338 ??? It is known that if it were an array of entries, structure elements
339 `next_same_hash' and `bitmap_index' wouldn't be necessary. However, it is
340 not clear whether in the final analysis a sufficient amount of memory would
341 be saved as the size of the available expression bitmaps would be larger
342 [one could build a mapping table without holes afterwards though].
343 Someday I'll perform the computation and figure it out. */
348 This is an array of `expr_hash_table_size' elements. */
351 /* Size of the hash table, in elements. */
354 /* Number of hash table elements. */
355 unsigned int n_elems;
357 /* Whether the table is expression of copy propagation one. */
361 /* Expression hash table. */
362 static struct hash_table_d expr_hash_table;
364 /* Copy propagation hash table. */
365 static struct hash_table_d set_hash_table;
367 /* This is a list of expressions which are MEMs and will be used by load
369 Load motion tracks MEMs which aren't killed by
370 anything except itself. (i.e., loads and stores to a single location).
371 We can then allow movement of these MEM refs with a little special
372 allowance. (all stores copy the same value to the reaching reg used
373 for the loads). This means all values used to store into memory must have
374 no side effects so we can re-issue the setter value.
375 Store Motion uses this structure as an expression table to track stores
376 which look interesting, and might be moveable towards the exit block. */
380 struct expr * expr; /* Gcse expression reference for LM. */
381 rtx pattern; /* Pattern of this mem. */
382 rtx pattern_regs; /* List of registers mentioned by the mem. */
383 rtx loads; /* INSN list of loads seen. */
384 rtx stores; /* INSN list of stores seen. */
385 struct ls_expr * next; /* Next in the list. */
386 int invalid; /* Invalid for some reason. */
387 int index; /* If it maps to a bitmap index. */
388 unsigned int hash_index; /* Index when in a hash table. */
389 rtx reaching_reg; /* Register to use when re-writing. */
392 /* Array of implicit set patterns indexed by basic block index. */
393 static rtx *implicit_sets;
395 /* Head of the list of load/store memory refs. */
396 static struct ls_expr * pre_ldst_mems = NULL;
398 /* Hashtable for the load/store memory refs. */
399 static htab_t pre_ldst_table = NULL;
401 /* Bitmap containing one bit for each register in the program.
402 Used when performing GCSE to track which registers have been set since
403 the start of the basic block. */
404 static regset reg_set_bitmap;
406 /* Array, indexed by basic block number for a list of insns which modify
407 memory within that block. */
408 static rtx * modify_mem_list;
409 static bitmap modify_mem_list_set;
411 /* This array parallels modify_mem_list, but is kept canonicalized. */
412 static rtx * canon_modify_mem_list;
414 /* Bitmap indexed by block numbers to record which blocks contain
416 static bitmap blocks_with_calls;
418 /* Various variables for statistics gathering. */
420 /* Memory used in a pass.
421 This isn't intended to be absolutely precise. Its intent is only
422 to keep an eye on memory usage. */
423 static int bytes_used;
425 /* GCSE substitutions made. */
426 static int gcse_subst_count;
427 /* Number of copy instructions created. */
428 static int gcse_create_count;
429 /* Number of local constants propagated. */
430 static int local_const_prop_count;
431 /* Number of local copies propagated. */
432 static int local_copy_prop_count;
433 /* Number of global constants propagated. */
434 static int global_const_prop_count;
435 /* Number of global copies propagated. */
436 static int global_copy_prop_count;
438 /* Doing code hoisting. */
439 static bool doing_code_hoisting_p = false;
441 /* For available exprs */
442 static sbitmap *ae_kill;
444 static void compute_can_copy (void);
445 static void *gmalloc (size_t) ATTRIBUTE_MALLOC;
446 static void *gcalloc (size_t, size_t) ATTRIBUTE_MALLOC;
447 static void *gcse_alloc (unsigned long);
448 static void alloc_gcse_mem (void);
449 static void free_gcse_mem (void);
450 static void hash_scan_insn (rtx, struct hash_table_d *);
451 static void hash_scan_set (rtx, rtx, struct hash_table_d *);
452 static void hash_scan_clobber (rtx, rtx, struct hash_table_d *);
453 static void hash_scan_call (rtx, rtx, struct hash_table_d *);
454 static int want_to_gcse_p (rtx, int *);
455 static bool gcse_constant_p (const_rtx);
456 static int oprs_unchanged_p (const_rtx, const_rtx, int);
457 static int oprs_anticipatable_p (const_rtx, const_rtx);
458 static int oprs_available_p (const_rtx, const_rtx);
459 static void insert_expr_in_table (rtx, enum machine_mode, rtx, int, int, int,
460 struct hash_table_d *);
461 static void insert_set_in_table (rtx, rtx, struct hash_table_d *);
462 static unsigned int hash_expr (const_rtx, enum machine_mode, int *, int);
463 static unsigned int hash_set (int, int);
464 static int expr_equiv_p (const_rtx, const_rtx);
465 static void record_last_reg_set_info (rtx, int);
466 static void record_last_mem_set_info (rtx);
467 static void record_last_set_info (rtx, const_rtx, void *);
468 static void compute_hash_table (struct hash_table_d *);
469 static void alloc_hash_table (struct hash_table_d *, int);
470 static void free_hash_table (struct hash_table_d *);
471 static void compute_hash_table_work (struct hash_table_d *);
472 static void dump_hash_table (FILE *, const char *, struct hash_table_d *);
473 static struct expr *lookup_set (unsigned int, struct hash_table_d *);
474 static struct expr *next_set (unsigned int, struct expr *);
475 static void reset_opr_set_tables (void);
476 static int oprs_not_set_p (const_rtx, const_rtx);
477 static void mark_call (rtx);
478 static void mark_set (rtx, rtx);
479 static void mark_clobber (rtx, rtx);
480 static void mark_oprs_set (rtx);
481 static void alloc_cprop_mem (int, int);
482 static void free_cprop_mem (void);
483 static void compute_transp (const_rtx, int, sbitmap *, int);
484 static void compute_local_properties (sbitmap *, sbitmap *, sbitmap *,
485 struct hash_table_d *);
486 static void compute_cprop_data (void);
487 static void find_used_regs (rtx *, void *);
488 static int try_replace_reg (rtx, rtx, rtx);
489 static struct expr *find_avail_set (int, rtx);
490 static int cprop_jump (basic_block, rtx, rtx, rtx, rtx);
491 static void mems_conflict_for_gcse_p (rtx, const_rtx, void *);
492 static int load_killed_in_block_p (const_basic_block, int, const_rtx, int);
493 static void canon_list_insert (rtx, const_rtx, void *);
494 static int cprop_insn (rtx);
495 static void find_implicit_sets (void);
496 static int one_cprop_pass (void);
497 static bool constprop_register (rtx, rtx, rtx);
498 static struct expr *find_bypass_set (int, int);
499 static bool reg_killed_on_edge (const_rtx, const_edge);
500 static int bypass_block (basic_block, rtx, rtx);
501 static int bypass_conditional_jumps (void);
502 static void alloc_pre_mem (int, int);
503 static void free_pre_mem (void);
504 static void compute_pre_data (void);
505 static int pre_expr_reaches_here_p (basic_block, struct expr *,
507 static void insert_insn_end_basic_block (struct expr *, basic_block);
508 static void pre_insert_copy_insn (struct expr *, rtx);
509 static void pre_insert_copies (void);
510 static int pre_delete (void);
511 static int pre_gcse (void);
512 static int one_pre_gcse_pass (void);
513 static void add_label_notes (rtx, rtx);
514 static void alloc_code_hoist_mem (int, int);
515 static void free_code_hoist_mem (void);
516 static void compute_code_hoist_vbeinout (void);
517 static void compute_code_hoist_data (void);
518 static int hoist_expr_reaches_here_p (basic_block, int, basic_block, char *,
520 static int hoist_code (void);
521 static int one_code_hoisting_pass (void);
522 static rtx process_insert_insn (struct expr *);
523 static int pre_edge_insert (struct edge_list *, struct expr **);
524 static int pre_expr_reaches_here_p_work (basic_block, struct expr *,
525 basic_block, char *);
526 static struct ls_expr * ldst_entry (rtx);
527 static void free_ldst_entry (struct ls_expr *);
528 static void free_ldst_mems (void);
529 static void print_ldst_list (FILE *);
530 static struct ls_expr * find_rtx_in_ldst (rtx);
531 static inline struct ls_expr * first_ls_expr (void);
532 static inline struct ls_expr * next_ls_expr (struct ls_expr *);
533 static int simple_mem (const_rtx);
534 static void invalidate_any_buried_refs (rtx);
535 static void compute_ld_motion_mems (void);
536 static void trim_ld_motion_mems (void);
537 static void update_ld_motion_stores (struct expr *);
538 static void free_insn_expr_list_list (rtx *);
539 static void clear_modify_mem_tables (void);
540 static void free_modify_mem_tables (void);
541 static rtx gcse_emit_move_after (rtx, rtx, rtx);
542 static void local_cprop_find_used_regs (rtx *, void *);
543 static bool do_local_cprop (rtx, rtx);
544 static int local_cprop_pass (void);
545 static bool is_too_expensive (const char *);
547 #define GNEW(T) ((T *) gmalloc (sizeof (T)))
548 #define GCNEW(T) ((T *) gcalloc (1, sizeof (T)))
550 #define GNEWVEC(T, N) ((T *) gmalloc (sizeof (T) * (N)))
551 #define GCNEWVEC(T, N) ((T *) gcalloc ((N), sizeof (T)))
553 #define GNEWVAR(T, S) ((T *) gmalloc ((S)))
554 #define GCNEWVAR(T, S) ((T *) gcalloc (1, (S)))
556 #define GOBNEW(T) ((T *) gcse_alloc (sizeof (T)))
557 #define GOBNEWVAR(T, S) ((T *) gcse_alloc ((S)))
559 /* Misc. utilities. */
562 (this_target_gcse->x_can_copy)
563 #define can_copy_init_p \
564 (this_target_gcse->x_can_copy_init_p)
566 /* Compute which modes support reg/reg copy operations. */
569 compute_can_copy (void)
572 #ifndef AVOID_CCMODE_COPIES
575 memset (can_copy, 0, NUM_MACHINE_MODES);
578 for (i = 0; i < NUM_MACHINE_MODES; i++)
579 if (GET_MODE_CLASS (i) == MODE_CC)
581 #ifdef AVOID_CCMODE_COPIES
584 reg = gen_rtx_REG ((enum machine_mode) i, LAST_VIRTUAL_REGISTER + 1);
585 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, reg));
586 if (recog (PATTERN (insn), insn, NULL) >= 0)
596 /* Returns whether the mode supports reg/reg copy operations. */
599 can_copy_p (enum machine_mode mode)
601 if (! can_copy_init_p)
604 can_copy_init_p = true;
607 return can_copy[mode] != 0;
611 /* Cover function to xmalloc to record bytes allocated. */
614 gmalloc (size_t size)
617 return xmalloc (size);
620 /* Cover function to xcalloc to record bytes allocated. */
623 gcalloc (size_t nelem, size_t elsize)
625 bytes_used += nelem * elsize;
626 return xcalloc (nelem, elsize);
629 /* Cover function to obstack_alloc. */
632 gcse_alloc (unsigned long size)
635 return obstack_alloc (&gcse_obstack, size);
638 /* Allocate memory for the reg/memory set tracking tables.
639 This is called at the start of each pass. */
642 alloc_gcse_mem (void)
644 /* Allocate vars to track sets of regs. */
645 reg_set_bitmap = ALLOC_REG_SET (NULL);
647 /* Allocate array to keep a list of insns which modify memory in each
649 modify_mem_list = GCNEWVEC (rtx, last_basic_block);
650 canon_modify_mem_list = GCNEWVEC (rtx, last_basic_block);
651 modify_mem_list_set = BITMAP_ALLOC (NULL);
652 blocks_with_calls = BITMAP_ALLOC (NULL);
655 /* Free memory allocated by alloc_gcse_mem. */
660 FREE_REG_SET (reg_set_bitmap);
662 free_modify_mem_tables ();
663 BITMAP_FREE (modify_mem_list_set);
664 BITMAP_FREE (blocks_with_calls);
667 /* Compute the local properties of each recorded expression.
669 Local properties are those that are defined by the block, irrespective of
672 An expression is transparent in a block if its operands are not modified
675 An expression is computed (locally available) in a block if it is computed
676 at least once and expression would contain the same value if the
677 computation was moved to the end of the block.
679 An expression is locally anticipatable in a block if it is computed at
680 least once and expression would contain the same value if the computation
681 was moved to the beginning of the block.
683 We call this routine for cprop, pre and code hoisting. They all compute
684 basically the same information and thus can easily share this code.
686 TRANSP, COMP, and ANTLOC are destination sbitmaps for recording local
687 properties. If NULL, then it is not necessary to compute or record that
690 TABLE controls which hash table to look at. If it is set hash table,
691 additionally, TRANSP is computed as ~TRANSP, since this is really cprop's
695 compute_local_properties (sbitmap *transp, sbitmap *comp, sbitmap *antloc,
696 struct hash_table_d *table)
700 /* Initialize any bitmaps that were passed in. */
704 sbitmap_vector_zero (transp, last_basic_block);
706 sbitmap_vector_ones (transp, last_basic_block);
710 sbitmap_vector_zero (comp, last_basic_block);
712 sbitmap_vector_zero (antloc, last_basic_block);
714 for (i = 0; i < table->size; i++)
718 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
720 int indx = expr->bitmap_index;
723 /* The expression is transparent in this block if it is not killed.
724 We start by assuming all are transparent [none are killed], and
725 then reset the bits for those that are. */
727 compute_transp (expr->expr, indx, transp, table->set_p);
729 /* The occurrences recorded in antic_occr are exactly those that
730 we want to set to nonzero in ANTLOC. */
732 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
734 SET_BIT (antloc[BLOCK_FOR_INSN (occr->insn)->index], indx);
736 /* While we're scanning the table, this is a good place to
741 /* The occurrences recorded in avail_occr are exactly those that
742 we want to set to nonzero in COMP. */
744 for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
746 SET_BIT (comp[BLOCK_FOR_INSN (occr->insn)->index], indx);
748 /* While we're scanning the table, this is a good place to
753 /* While we're scanning the table, this is a good place to
755 expr->reaching_reg = 0;
760 /* Hash table support. */
762 struct reg_avail_info
769 static struct reg_avail_info *reg_avail_info;
770 static basic_block current_bb;
773 /* See whether X, the source of a set, is something we want to consider for
777 want_to_gcse_p (rtx x, int *max_distance_ptr)
780 /* On register stack architectures, don't GCSE constants from the
781 constant pool, as the benefits are often swamped by the overhead
782 of shuffling the register stack between basic blocks. */
783 if (IS_STACK_MODE (GET_MODE (x)))
784 x = avoid_constant_pool_reference (x);
787 /* GCSE'ing constants:
789 We do not specifically distinguish between constant and non-constant
790 expressions in PRE and Hoist. We use rtx_cost below to limit
791 the maximum distance simple expressions can travel.
793 Nevertheless, constants are much easier to GCSE, and, hence,
794 it is easy to overdo the optimizations. Usually, excessive PRE and
795 Hoisting of constant leads to increased register pressure.
797 RA can deal with this by rematerialing some of the constants.
798 Therefore, it is important that the back-end generates sets of constants
799 in a way that allows reload rematerialize them under high register
800 pressure, i.e., a pseudo register with REG_EQUAL to constant
801 is set only once. Failing to do so will result in IRA/reload
802 spilling such constants under high register pressure instead of
803 rematerializing them. */
805 switch (GET_CODE (x))
816 if (!doing_code_hoisting_p)
817 /* Do not PRE constants. */
823 if (doing_code_hoisting_p)
824 /* PRE doesn't implement max_distance restriction. */
829 gcc_assert (!optimize_function_for_speed_p (cfun)
830 && optimize_function_for_size_p (cfun));
831 cost = rtx_cost (x, SET, 0);
833 if (cost < COSTS_N_INSNS (GCSE_UNRESTRICTED_COST))
835 max_distance = (GCSE_COST_DISTANCE_RATIO * cost) / 10;
836 if (max_distance == 0)
839 gcc_assert (max_distance > 0);
844 if (max_distance_ptr)
845 *max_distance_ptr = max_distance;
848 return can_assign_to_reg_without_clobbers_p (x);
852 /* Used internally by can_assign_to_reg_without_clobbers_p. */
854 static GTY(()) rtx test_insn;
856 /* Return true if we can assign X to a pseudo register such that the
857 resulting insn does not result in clobbering a hard register as a
860 Additionally, if the target requires it, check that the resulting insn
861 can be copied. If it cannot, this means that X is special and probably
862 has hidden side-effects we don't want to mess with.
864 This function is typically used by code motion passes, to verify
865 that it is safe to insert an insn without worrying about clobbering
866 maybe live hard regs. */
869 can_assign_to_reg_without_clobbers_p (rtx x)
871 int num_clobbers = 0;
874 /* If this is a valid operand, we are OK. If it's VOIDmode, we aren't. */
875 if (general_operand (x, GET_MODE (x)))
877 else if (GET_MODE (x) == VOIDmode)
880 /* Otherwise, check if we can make a valid insn from it. First initialize
881 our test insn if we haven't already. */
885 = make_insn_raw (gen_rtx_SET (VOIDmode,
886 gen_rtx_REG (word_mode,
887 FIRST_PSEUDO_REGISTER * 2),
889 NEXT_INSN (test_insn) = PREV_INSN (test_insn) = 0;
892 /* Now make an insn like the one we would make when GCSE'ing and see if
894 PUT_MODE (SET_DEST (PATTERN (test_insn)), GET_MODE (x));
895 SET_SRC (PATTERN (test_insn)) = x;
897 icode = recog (PATTERN (test_insn), test_insn, &num_clobbers);
901 if (num_clobbers > 0 && added_clobbers_hard_reg_p (icode))
904 if (targetm.cannot_copy_insn_p && targetm.cannot_copy_insn_p (test_insn))
910 /* Return nonzero if the operands of expression X are unchanged from the
911 start of INSN's basic block up to but not including INSN (if AVAIL_P == 0),
912 or from INSN to the end of INSN's basic block (if AVAIL_P != 0). */
915 oprs_unchanged_p (const_rtx x, const_rtx insn, int avail_p)
929 struct reg_avail_info *info = ®_avail_info[REGNO (x)];
931 if (info->last_bb != current_bb)
934 return info->last_set < DF_INSN_LUID (insn);
936 return info->first_set >= DF_INSN_LUID (insn);
940 if (load_killed_in_block_p (current_bb, DF_INSN_LUID (insn),
944 return oprs_unchanged_p (XEXP (x, 0), insn, avail_p);
971 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
975 /* If we are about to do the last recursive call needed at this
976 level, change it into iteration. This function is called enough
979 return oprs_unchanged_p (XEXP (x, i), insn, avail_p);
981 else if (! oprs_unchanged_p (XEXP (x, i), insn, avail_p))
984 else if (fmt[i] == 'E')
985 for (j = 0; j < XVECLEN (x, i); j++)
986 if (! oprs_unchanged_p (XVECEXP (x, i, j), insn, avail_p))
993 /* Used for communication between mems_conflict_for_gcse_p and
994 load_killed_in_block_p. Nonzero if mems_conflict_for_gcse_p finds a
995 conflict between two memory references. */
996 static int gcse_mems_conflict_p;
998 /* Used for communication between mems_conflict_for_gcse_p and
999 load_killed_in_block_p. A memory reference for a load instruction,
1000 mems_conflict_for_gcse_p will see if a memory store conflicts with
1001 this memory load. */
1002 static const_rtx gcse_mem_operand;
1004 /* DEST is the output of an instruction. If it is a memory reference, and
1005 possibly conflicts with the load found in gcse_mem_operand, then set
1006 gcse_mems_conflict_p to a nonzero value. */
1009 mems_conflict_for_gcse_p (rtx dest, const_rtx setter ATTRIBUTE_UNUSED,
1010 void *data ATTRIBUTE_UNUSED)
1012 while (GET_CODE (dest) == SUBREG
1013 || GET_CODE (dest) == ZERO_EXTRACT
1014 || GET_CODE (dest) == STRICT_LOW_PART)
1015 dest = XEXP (dest, 0);
1017 /* If DEST is not a MEM, then it will not conflict with the load. Note
1018 that function calls are assumed to clobber memory, but are handled
1023 /* If we are setting a MEM in our list of specially recognized MEMs,
1024 don't mark as killed this time. */
1026 if (expr_equiv_p (dest, gcse_mem_operand) && pre_ldst_mems != NULL)
1028 if (!find_rtx_in_ldst (dest))
1029 gcse_mems_conflict_p = 1;
1033 if (true_dependence (dest, GET_MODE (dest), gcse_mem_operand,
1035 gcse_mems_conflict_p = 1;
1038 /* Return nonzero if the expression in X (a memory reference) is killed
1039 in block BB before or after the insn with the LUID in UID_LIMIT.
1040 AVAIL_P is nonzero for kills after UID_LIMIT, and zero for kills
1043 To check the entire block, set UID_LIMIT to max_uid + 1 and
1047 load_killed_in_block_p (const_basic_block bb, int uid_limit, const_rtx x, int avail_p)
1049 rtx list_entry = modify_mem_list[bb->index];
1051 /* If this is a readonly then we aren't going to be changing it. */
1052 if (MEM_READONLY_P (x))
1058 /* Ignore entries in the list that do not apply. */
1060 && DF_INSN_LUID (XEXP (list_entry, 0)) < uid_limit)
1062 && DF_INSN_LUID (XEXP (list_entry, 0)) > uid_limit))
1064 list_entry = XEXP (list_entry, 1);
1068 setter = XEXP (list_entry, 0);
1070 /* If SETTER is a call everything is clobbered. Note that calls
1071 to pure functions are never put on the list, so we need not
1072 worry about them. */
1073 if (CALL_P (setter))
1076 /* SETTER must be an INSN of some kind that sets memory. Call
1077 note_stores to examine each hunk of memory that is modified.
1079 The note_stores interface is pretty limited, so we have to
1080 communicate via global variables. Yuk. */
1081 gcse_mem_operand = x;
1082 gcse_mems_conflict_p = 0;
1083 note_stores (PATTERN (setter), mems_conflict_for_gcse_p, NULL);
1084 if (gcse_mems_conflict_p)
1086 list_entry = XEXP (list_entry, 1);
1091 /* Return nonzero if the operands of expression X are unchanged from
1092 the start of INSN's basic block up to but not including INSN. */
1095 oprs_anticipatable_p (const_rtx x, const_rtx insn)
1097 return oprs_unchanged_p (x, insn, 0);
1100 /* Return nonzero if the operands of expression X are unchanged from
1101 INSN to the end of INSN's basic block. */
1104 oprs_available_p (const_rtx x, const_rtx insn)
1106 return oprs_unchanged_p (x, insn, 1);
1109 /* Hash expression X.
1111 MODE is only used if X is a CONST_INT. DO_NOT_RECORD_P is a boolean
1112 indicating if a volatile operand is found or if the expression contains
1113 something we don't want to insert in the table. HASH_TABLE_SIZE is
1114 the current size of the hash table to be probed. */
1117 hash_expr (const_rtx x, enum machine_mode mode, int *do_not_record_p,
1118 int hash_table_size)
1122 *do_not_record_p = 0;
1124 hash = hash_rtx (x, mode, do_not_record_p,
1125 NULL, /*have_reg_qty=*/false);
1126 return hash % hash_table_size;
1129 /* Hash a set of register REGNO.
1131 Sets are hashed on the register that is set. This simplifies the PRE copy
1134 ??? May need to make things more elaborate. Later, as necessary. */
1137 hash_set (int regno, int hash_table_size)
1142 return hash % hash_table_size;
1145 /* Return nonzero if exp1 is equivalent to exp2. */
1148 expr_equiv_p (const_rtx x, const_rtx y)
1150 return exp_equiv_p (x, y, 0, true);
1153 /* Insert expression X in INSN in the hash TABLE.
1154 If it is already present, record it as the last occurrence in INSN's
1157 MODE is the mode of the value X is being stored into.
1158 It is only used if X is a CONST_INT.
1160 ANTIC_P is nonzero if X is an anticipatable expression.
1161 AVAIL_P is nonzero if X is an available expression.
1163 MAX_DISTANCE is the maximum distance in instructions this expression can
1167 insert_expr_in_table (rtx x, enum machine_mode mode, rtx insn, int antic_p,
1168 int avail_p, int max_distance, struct hash_table_d *table)
1170 int found, do_not_record_p;
1172 struct expr *cur_expr, *last_expr = NULL;
1173 struct occr *antic_occr, *avail_occr;
1175 hash = hash_expr (x, mode, &do_not_record_p, table->size);
1177 /* Do not insert expression in table if it contains volatile operands,
1178 or if hash_expr determines the expression is something we don't want
1179 to or can't handle. */
1180 if (do_not_record_p)
1183 cur_expr = table->table[hash];
1186 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
1188 /* If the expression isn't found, save a pointer to the end of
1190 last_expr = cur_expr;
1191 cur_expr = cur_expr->next_same_hash;
1196 cur_expr = GOBNEW (struct expr);
1197 bytes_used += sizeof (struct expr);
1198 if (table->table[hash] == NULL)
1199 /* This is the first pattern that hashed to this index. */
1200 table->table[hash] = cur_expr;
1202 /* Add EXPR to end of this hash chain. */
1203 last_expr->next_same_hash = cur_expr;
1205 /* Set the fields of the expr element. */
1207 cur_expr->bitmap_index = table->n_elems++;
1208 cur_expr->next_same_hash = NULL;
1209 cur_expr->antic_occr = NULL;
1210 cur_expr->avail_occr = NULL;
1211 gcc_assert (max_distance >= 0);
1212 cur_expr->max_distance = max_distance;
1215 gcc_assert (cur_expr->max_distance == max_distance);
1217 /* Now record the occurrence(s). */
1220 antic_occr = cur_expr->antic_occr;
1223 && BLOCK_FOR_INSN (antic_occr->insn) != BLOCK_FOR_INSN (insn))
1227 /* Found another instance of the expression in the same basic block.
1228 Prefer the currently recorded one. We want the first one in the
1229 block and the block is scanned from start to end. */
1230 ; /* nothing to do */
1233 /* First occurrence of this expression in this basic block. */
1234 antic_occr = GOBNEW (struct occr);
1235 bytes_used += sizeof (struct occr);
1236 antic_occr->insn = insn;
1237 antic_occr->next = cur_expr->antic_occr;
1238 antic_occr->deleted_p = 0;
1239 cur_expr->antic_occr = antic_occr;
1245 avail_occr = cur_expr->avail_occr;
1248 && BLOCK_FOR_INSN (avail_occr->insn) == BLOCK_FOR_INSN (insn))
1250 /* Found another instance of the expression in the same basic block.
1251 Prefer this occurrence to the currently recorded one. We want
1252 the last one in the block and the block is scanned from start
1254 avail_occr->insn = insn;
1258 /* First occurrence of this expression in this basic block. */
1259 avail_occr = GOBNEW (struct occr);
1260 bytes_used += sizeof (struct occr);
1261 avail_occr->insn = insn;
1262 avail_occr->next = cur_expr->avail_occr;
1263 avail_occr->deleted_p = 0;
1264 cur_expr->avail_occr = avail_occr;
1269 /* Insert pattern X in INSN in the hash table.
1270 X is a SET of a reg to either another reg or a constant.
1271 If it is already present, record it as the last occurrence in INSN's
1275 insert_set_in_table (rtx x, rtx insn, struct hash_table_d *table)
1279 struct expr *cur_expr, *last_expr = NULL;
1280 struct occr *cur_occr;
1282 gcc_assert (GET_CODE (x) == SET && REG_P (SET_DEST (x)));
1284 hash = hash_set (REGNO (SET_DEST (x)), table->size);
1286 cur_expr = table->table[hash];
1289 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
1291 /* If the expression isn't found, save a pointer to the end of
1293 last_expr = cur_expr;
1294 cur_expr = cur_expr->next_same_hash;
1299 cur_expr = GOBNEW (struct expr);
1300 bytes_used += sizeof (struct expr);
1301 if (table->table[hash] == NULL)
1302 /* This is the first pattern that hashed to this index. */
1303 table->table[hash] = cur_expr;
1305 /* Add EXPR to end of this hash chain. */
1306 last_expr->next_same_hash = cur_expr;
1308 /* Set the fields of the expr element.
1309 We must copy X because it can be modified when copy propagation is
1310 performed on its operands. */
1311 cur_expr->expr = copy_rtx (x);
1312 cur_expr->bitmap_index = table->n_elems++;
1313 cur_expr->next_same_hash = NULL;
1314 cur_expr->antic_occr = NULL;
1315 cur_expr->avail_occr = NULL;
1316 /* Not used for set_p tables. */
1317 cur_expr->max_distance = 0;
1320 /* Now record the occurrence. */
1321 cur_occr = cur_expr->avail_occr;
1324 && BLOCK_FOR_INSN (cur_occr->insn) == BLOCK_FOR_INSN (insn))
1326 /* Found another instance of the expression in the same basic block.
1327 Prefer this occurrence to the currently recorded one. We want
1328 the last one in the block and the block is scanned from start
1330 cur_occr->insn = insn;
1334 /* First occurrence of this expression in this basic block. */
1335 cur_occr = GOBNEW (struct occr);
1336 bytes_used += sizeof (struct occr);
1337 cur_occr->insn = insn;
1338 cur_occr->next = cur_expr->avail_occr;
1339 cur_occr->deleted_p = 0;
1340 cur_expr->avail_occr = cur_occr;
1344 /* Determine whether the rtx X should be treated as a constant for
1345 the purposes of GCSE's constant propagation. */
1348 gcse_constant_p (const_rtx x)
1350 /* Consider a COMPARE of two integers constant. */
1351 if (GET_CODE (x) == COMPARE
1352 && CONST_INT_P (XEXP (x, 0))
1353 && CONST_INT_P (XEXP (x, 1)))
1356 /* Consider a COMPARE of the same registers is a constant
1357 if they are not floating point registers. */
1358 if (GET_CODE(x) == COMPARE
1359 && REG_P (XEXP (x, 0)) && REG_P (XEXP (x, 1))
1360 && REGNO (XEXP (x, 0)) == REGNO (XEXP (x, 1))
1361 && ! FLOAT_MODE_P (GET_MODE (XEXP (x, 0)))
1362 && ! FLOAT_MODE_P (GET_MODE (XEXP (x, 1))))
1365 /* Since X might be inserted more than once we have to take care that it
1367 return CONSTANT_P (x) && (GET_CODE (x) != CONST || shared_const_p (x));
1370 /* Scan pattern PAT of INSN and add an entry to the hash TABLE (set or
1374 hash_scan_set (rtx pat, rtx insn, struct hash_table_d *table)
1376 rtx src = SET_SRC (pat);
1377 rtx dest = SET_DEST (pat);
1380 if (GET_CODE (src) == CALL)
1381 hash_scan_call (src, insn, table);
1383 else if (REG_P (dest))
1385 unsigned int regno = REGNO (dest);
1387 int max_distance = 0;
1389 /* See if a REG_EQUAL note shows this equivalent to a simpler expression.
1391 This allows us to do a single GCSE pass and still eliminate
1392 redundant constants, addresses or other expressions that are
1393 constructed with multiple instructions.
1395 However, keep the original SRC if INSN is a simple reg-reg move. In
1396 In this case, there will almost always be a REG_EQUAL note on the
1397 insn that sets SRC. By recording the REG_EQUAL value here as SRC
1398 for INSN, we miss copy propagation opportunities and we perform the
1399 same PRE GCSE operation repeatedly on the same REG_EQUAL value if we
1400 do more than one PRE GCSE pass.
1402 Note that this does not impede profitable constant propagations. We
1403 "look through" reg-reg sets in lookup_avail_set. */
1404 note = find_reg_equal_equiv_note (insn);
1406 && REG_NOTE_KIND (note) == REG_EQUAL
1409 ? gcse_constant_p (XEXP (note, 0))
1410 : want_to_gcse_p (XEXP (note, 0), NULL)))
1411 src = XEXP (note, 0), pat = gen_rtx_SET (VOIDmode, dest, src);
1413 /* Only record sets of pseudo-regs in the hash table. */
1415 && regno >= FIRST_PSEUDO_REGISTER
1416 /* Don't GCSE something if we can't do a reg/reg copy. */
1417 && can_copy_p (GET_MODE (dest))
1418 /* GCSE commonly inserts instruction after the insn. We can't
1419 do that easily for EH edges so disable GCSE on these for now. */
1420 /* ??? We can now easily create new EH landing pads at the
1421 gimple level, for splitting edges; there's no reason we
1422 can't do the same thing at the rtl level. */
1423 && !can_throw_internal (insn)
1424 /* Is SET_SRC something we want to gcse? */
1425 && want_to_gcse_p (src, &max_distance)
1426 /* Don't CSE a nop. */
1427 && ! set_noop_p (pat)
1428 /* Don't GCSE if it has attached REG_EQUIV note.
1429 At this point this only function parameters should have
1430 REG_EQUIV notes and if the argument slot is used somewhere
1431 explicitly, it means address of parameter has been taken,
1432 so we should not extend the lifetime of the pseudo. */
1433 && (note == NULL_RTX || ! MEM_P (XEXP (note, 0))))
1435 /* An expression is not anticipatable if its operands are
1436 modified before this insn or if this is not the only SET in
1437 this insn. The latter condition does not have to mean that
1438 SRC itself is not anticipatable, but we just will not be
1439 able to handle code motion of insns with multiple sets. */
1440 int antic_p = oprs_anticipatable_p (src, insn)
1441 && !multiple_sets (insn);
1442 /* An expression is not available if its operands are
1443 subsequently modified, including this insn. It's also not
1444 available if this is a branch, because we can't insert
1445 a set after the branch. */
1446 int avail_p = (oprs_available_p (src, insn)
1447 && ! JUMP_P (insn));
1449 insert_expr_in_table (src, GET_MODE (dest), insn, antic_p, avail_p,
1450 max_distance, table);
1453 /* Record sets for constant/copy propagation. */
1454 else if (table->set_p
1455 && regno >= FIRST_PSEUDO_REGISTER
1457 && REGNO (src) >= FIRST_PSEUDO_REGISTER
1458 && can_copy_p (GET_MODE (dest))
1459 && REGNO (src) != regno)
1460 || gcse_constant_p (src))
1461 /* A copy is not available if its src or dest is subsequently
1462 modified. Here we want to search from INSN+1 on, but
1463 oprs_available_p searches from INSN on. */
1464 && (insn == BB_END (BLOCK_FOR_INSN (insn))
1465 || (tmp = next_nonnote_nondebug_insn (insn)) == NULL_RTX
1466 || BLOCK_FOR_INSN (tmp) != BLOCK_FOR_INSN (insn)
1467 || oprs_available_p (pat, tmp)))
1468 insert_set_in_table (pat, insn, table);
1470 /* In case of store we want to consider the memory value as available in
1471 the REG stored in that memory. This makes it possible to remove
1472 redundant loads from due to stores to the same location. */
1473 else if (flag_gcse_las && REG_P (src) && MEM_P (dest))
1475 unsigned int regno = REGNO (src);
1476 int max_distance = 0;
1478 /* Do not do this for constant/copy propagation. */
1480 /* Only record sets of pseudo-regs in the hash table. */
1481 && regno >= FIRST_PSEUDO_REGISTER
1482 /* Don't GCSE something if we can't do a reg/reg copy. */
1483 && can_copy_p (GET_MODE (src))
1484 /* GCSE commonly inserts instruction after the insn. We can't
1485 do that easily for EH edges so disable GCSE on these for now. */
1486 && !can_throw_internal (insn)
1487 /* Is SET_DEST something we want to gcse? */
1488 && want_to_gcse_p (dest, &max_distance)
1489 /* Don't CSE a nop. */
1490 && ! set_noop_p (pat)
1491 /* Don't GCSE if it has attached REG_EQUIV note.
1492 At this point this only function parameters should have
1493 REG_EQUIV notes and if the argument slot is used somewhere
1494 explicitly, it means address of parameter has been taken,
1495 so we should not extend the lifetime of the pseudo. */
1496 && ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) == 0
1497 || ! MEM_P (XEXP (note, 0))))
1499 /* Stores are never anticipatable. */
1501 /* An expression is not available if its operands are
1502 subsequently modified, including this insn. It's also not
1503 available if this is a branch, because we can't insert
1504 a set after the branch. */
1505 int avail_p = oprs_available_p (dest, insn)
1508 /* Record the memory expression (DEST) in the hash table. */
1509 insert_expr_in_table (dest, GET_MODE (dest), insn,
1510 antic_p, avail_p, max_distance, table);
1516 hash_scan_clobber (rtx x ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED,
1517 struct hash_table_d *table ATTRIBUTE_UNUSED)
1519 /* Currently nothing to do. */
1523 hash_scan_call (rtx x ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED,
1524 struct hash_table_d *table ATTRIBUTE_UNUSED)
1526 /* Currently nothing to do. */
1529 /* Process INSN and add hash table entries as appropriate.
1531 Only available expressions that set a single pseudo-reg are recorded.
1533 Single sets in a PARALLEL could be handled, but it's an extra complication
1534 that isn't dealt with right now. The trick is handling the CLOBBERs that
1535 are also in the PARALLEL. Later.
1537 If SET_P is nonzero, this is for the assignment hash table,
1538 otherwise it is for the expression hash table. */
1541 hash_scan_insn (rtx insn, struct hash_table_d *table)
1543 rtx pat = PATTERN (insn);
1546 /* Pick out the sets of INSN and for other forms of instructions record
1547 what's been modified. */
1549 if (GET_CODE (pat) == SET)
1550 hash_scan_set (pat, insn, table);
1551 else if (GET_CODE (pat) == PARALLEL)
1552 for (i = 0; i < XVECLEN (pat, 0); i++)
1554 rtx x = XVECEXP (pat, 0, i);
1556 if (GET_CODE (x) == SET)
1557 hash_scan_set (x, insn, table);
1558 else if (GET_CODE (x) == CLOBBER)
1559 hash_scan_clobber (x, insn, table);
1560 else if (GET_CODE (x) == CALL)
1561 hash_scan_call (x, insn, table);
1564 else if (GET_CODE (pat) == CLOBBER)
1565 hash_scan_clobber (pat, insn, table);
1566 else if (GET_CODE (pat) == CALL)
1567 hash_scan_call (pat, insn, table);
1571 dump_hash_table (FILE *file, const char *name, struct hash_table_d *table)
1574 /* Flattened out table, so it's printed in proper order. */
1575 struct expr **flat_table;
1576 unsigned int *hash_val;
1579 flat_table = XCNEWVEC (struct expr *, table->n_elems);
1580 hash_val = XNEWVEC (unsigned int, table->n_elems);
1582 for (i = 0; i < (int) table->size; i++)
1583 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
1585 flat_table[expr->bitmap_index] = expr;
1586 hash_val[expr->bitmap_index] = i;
1589 fprintf (file, "%s hash table (%d buckets, %d entries)\n",
1590 name, table->size, table->n_elems);
1592 for (i = 0; i < (int) table->n_elems; i++)
1593 if (flat_table[i] != 0)
1595 expr = flat_table[i];
1596 fprintf (file, "Index %d (hash value %d; max distance %d)\n ",
1597 expr->bitmap_index, hash_val[i], expr->max_distance);
1598 print_rtl (file, expr->expr);
1599 fprintf (file, "\n");
1602 fprintf (file, "\n");
1608 /* Record register first/last/block set information for REGNO in INSN.
1610 first_set records the first place in the block where the register
1611 is set and is used to compute "anticipatability".
1613 last_set records the last place in the block where the register
1614 is set and is used to compute "availability".
1616 last_bb records the block for which first_set and last_set are
1617 valid, as a quick test to invalidate them. */
1620 record_last_reg_set_info (rtx insn, int regno)
1622 struct reg_avail_info *info = ®_avail_info[regno];
1623 int luid = DF_INSN_LUID (insn);
1625 info->last_set = luid;
1626 if (info->last_bb != current_bb)
1628 info->last_bb = current_bb;
1629 info->first_set = luid;
1634 /* Record all of the canonicalized MEMs of record_last_mem_set_info's insn.
1635 Note we store a pair of elements in the list, so they have to be
1636 taken off pairwise. */
1639 canon_list_insert (rtx dest ATTRIBUTE_UNUSED, const_rtx unused1 ATTRIBUTE_UNUSED,
1642 rtx dest_addr, insn;
1645 while (GET_CODE (dest) == SUBREG
1646 || GET_CODE (dest) == ZERO_EXTRACT
1647 || GET_CODE (dest) == STRICT_LOW_PART)
1648 dest = XEXP (dest, 0);
1650 /* If DEST is not a MEM, then it will not conflict with a load. Note
1651 that function calls are assumed to clobber memory, but are handled
1657 dest_addr = get_addr (XEXP (dest, 0));
1658 dest_addr = canon_rtx (dest_addr);
1659 insn = (rtx) v_insn;
1660 bb = BLOCK_FOR_INSN (insn)->index;
1662 canon_modify_mem_list[bb] =
1663 alloc_EXPR_LIST (VOIDmode, dest_addr, canon_modify_mem_list[bb]);
1664 canon_modify_mem_list[bb] =
1665 alloc_EXPR_LIST (VOIDmode, dest, canon_modify_mem_list[bb]);
1668 /* Record memory modification information for INSN. We do not actually care
1669 about the memory location(s) that are set, or even how they are set (consider
1670 a CALL_INSN). We merely need to record which insns modify memory. */
1673 record_last_mem_set_info (rtx insn)
1675 int bb = BLOCK_FOR_INSN (insn)->index;
1677 /* load_killed_in_block_p will handle the case of calls clobbering
1679 modify_mem_list[bb] = alloc_INSN_LIST (insn, modify_mem_list[bb]);
1680 bitmap_set_bit (modify_mem_list_set, bb);
1684 /* Note that traversals of this loop (other than for free-ing)
1685 will break after encountering a CALL_INSN. So, there's no
1686 need to insert a pair of items, as canon_list_insert does. */
1687 canon_modify_mem_list[bb] =
1688 alloc_INSN_LIST (insn, canon_modify_mem_list[bb]);
1689 bitmap_set_bit (blocks_with_calls, bb);
1692 note_stores (PATTERN (insn), canon_list_insert, (void*) insn);
1695 /* Called from compute_hash_table via note_stores to handle one
1696 SET or CLOBBER in an insn. DATA is really the instruction in which
1697 the SET is taking place. */
1700 record_last_set_info (rtx dest, const_rtx setter ATTRIBUTE_UNUSED, void *data)
1702 rtx last_set_insn = (rtx) data;
1704 if (GET_CODE (dest) == SUBREG)
1705 dest = SUBREG_REG (dest);
1708 record_last_reg_set_info (last_set_insn, REGNO (dest));
1709 else if (MEM_P (dest)
1710 /* Ignore pushes, they clobber nothing. */
1711 && ! push_operand (dest, GET_MODE (dest)))
1712 record_last_mem_set_info (last_set_insn);
1715 /* Top level function to create an expression or assignment hash table.
1717 Expression entries are placed in the hash table if
1718 - they are of the form (set (pseudo-reg) src),
1719 - src is something we want to perform GCSE on,
1720 - none of the operands are subsequently modified in the block
1722 Assignment entries are placed in the hash table if
1723 - they are of the form (set (pseudo-reg) src),
1724 - src is something we want to perform const/copy propagation on,
1725 - none of the operands or target are subsequently modified in the block
1727 Currently src must be a pseudo-reg or a const_int.
1729 TABLE is the table computed. */
1732 compute_hash_table_work (struct hash_table_d *table)
1736 /* re-Cache any INSN_LIST nodes we have allocated. */
1737 clear_modify_mem_tables ();
1738 /* Some working arrays used to track first and last set in each block. */
1739 reg_avail_info = GNEWVEC (struct reg_avail_info, max_reg_num ());
1741 for (i = 0; i < max_reg_num (); ++i)
1742 reg_avail_info[i].last_bb = NULL;
1744 FOR_EACH_BB (current_bb)
1749 /* First pass over the instructions records information used to
1750 determine when registers and memory are first and last set. */
1751 FOR_BB_INSNS (current_bb, insn)
1753 if (!NONDEBUG_INSN_P (insn))
1758 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
1759 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
1760 record_last_reg_set_info (insn, regno);
1765 note_stores (PATTERN (insn), record_last_set_info, insn);
1768 /* Insert implicit sets in the hash table. */
1770 && implicit_sets[current_bb->index] != NULL_RTX)
1771 hash_scan_set (implicit_sets[current_bb->index],
1772 BB_HEAD (current_bb), table);
1774 /* The next pass builds the hash table. */
1775 FOR_BB_INSNS (current_bb, insn)
1776 if (NONDEBUG_INSN_P (insn))
1777 hash_scan_insn (insn, table);
1780 free (reg_avail_info);
1781 reg_avail_info = NULL;
1784 /* Allocate space for the set/expr hash TABLE.
1785 It is used to determine the number of buckets to use.
1786 SET_P determines whether set or expression table will
1790 alloc_hash_table (struct hash_table_d *table, int set_p)
1794 n = get_max_insn_count ();
1796 table->size = n / 4;
1797 if (table->size < 11)
1800 /* Attempt to maintain efficient use of hash table.
1801 Making it an odd number is simplest for now.
1802 ??? Later take some measurements. */
1804 n = table->size * sizeof (struct expr *);
1805 table->table = GNEWVAR (struct expr *, n);
1806 table->set_p = set_p;
1809 /* Free things allocated by alloc_hash_table. */
1812 free_hash_table (struct hash_table_d *table)
1814 free (table->table);
1817 /* Compute the hash TABLE for doing copy/const propagation or
1818 expression hash table. */
1821 compute_hash_table (struct hash_table_d *table)
1823 /* Initialize count of number of entries in hash table. */
1825 memset (table->table, 0, table->size * sizeof (struct expr *));
1827 compute_hash_table_work (table);
1830 /* Expression tracking support. */
1832 /* Lookup REGNO in the set TABLE. The result is a pointer to the
1833 table entry, or NULL if not found. */
1835 static struct expr *
1836 lookup_set (unsigned int regno, struct hash_table_d *table)
1838 unsigned int hash = hash_set (regno, table->size);
1841 expr = table->table[hash];
1843 while (expr && REGNO (SET_DEST (expr->expr)) != regno)
1844 expr = expr->next_same_hash;
1849 /* Return the next entry for REGNO in list EXPR. */
1851 static struct expr *
1852 next_set (unsigned int regno, struct expr *expr)
1855 expr = expr->next_same_hash;
1856 while (expr && REGNO (SET_DEST (expr->expr)) != regno);
1861 /* Like free_INSN_LIST_list or free_EXPR_LIST_list, except that the node
1862 types may be mixed. */
1865 free_insn_expr_list_list (rtx *listp)
1869 for (list = *listp; list ; list = next)
1871 next = XEXP (list, 1);
1872 if (GET_CODE (list) == EXPR_LIST)
1873 free_EXPR_LIST_node (list);
1875 free_INSN_LIST_node (list);
1881 /* Clear canon_modify_mem_list and modify_mem_list tables. */
1883 clear_modify_mem_tables (void)
1888 EXECUTE_IF_SET_IN_BITMAP (modify_mem_list_set, 0, i, bi)
1890 free_INSN_LIST_list (modify_mem_list + i);
1891 free_insn_expr_list_list (canon_modify_mem_list + i);
1893 bitmap_clear (modify_mem_list_set);
1894 bitmap_clear (blocks_with_calls);
1897 /* Release memory used by modify_mem_list_set. */
1900 free_modify_mem_tables (void)
1902 clear_modify_mem_tables ();
1903 free (modify_mem_list);
1904 free (canon_modify_mem_list);
1905 modify_mem_list = 0;
1906 canon_modify_mem_list = 0;
1909 /* Reset tables used to keep track of what's still available [since the
1910 start of the block]. */
1913 reset_opr_set_tables (void)
1915 /* Maintain a bitmap of which regs have been set since beginning of
1917 CLEAR_REG_SET (reg_set_bitmap);
1919 /* Also keep a record of the last instruction to modify memory.
1920 For now this is very trivial, we only record whether any memory
1921 location has been modified. */
1922 clear_modify_mem_tables ();
1925 /* Return nonzero if the operands of X are not set before INSN in
1926 INSN's basic block. */
1929 oprs_not_set_p (const_rtx x, const_rtx insn)
1938 code = GET_CODE (x);
1955 if (load_killed_in_block_p (BLOCK_FOR_INSN (insn),
1956 DF_INSN_LUID (insn), x, 0))
1959 return oprs_not_set_p (XEXP (x, 0), insn);
1962 return ! REGNO_REG_SET_P (reg_set_bitmap, REGNO (x));
1968 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
1972 /* If we are about to do the last recursive call
1973 needed at this level, change it into iteration.
1974 This function is called enough to be worth it. */
1976 return oprs_not_set_p (XEXP (x, i), insn);
1978 if (! oprs_not_set_p (XEXP (x, i), insn))
1981 else if (fmt[i] == 'E')
1982 for (j = 0; j < XVECLEN (x, i); j++)
1983 if (! oprs_not_set_p (XVECEXP (x, i, j), insn))
1990 /* Mark things set by a CALL. */
1993 mark_call (rtx insn)
1995 if (! RTL_CONST_OR_PURE_CALL_P (insn))
1996 record_last_mem_set_info (insn);
1999 /* Mark things set by a SET. */
2002 mark_set (rtx pat, rtx insn)
2004 rtx dest = SET_DEST (pat);
2006 while (GET_CODE (dest) == SUBREG
2007 || GET_CODE (dest) == ZERO_EXTRACT
2008 || GET_CODE (dest) == STRICT_LOW_PART)
2009 dest = XEXP (dest, 0);
2012 SET_REGNO_REG_SET (reg_set_bitmap, REGNO (dest));
2013 else if (MEM_P (dest))
2014 record_last_mem_set_info (insn);
2016 if (GET_CODE (SET_SRC (pat)) == CALL)
2020 /* Record things set by a CLOBBER. */
2023 mark_clobber (rtx pat, rtx insn)
2025 rtx clob = XEXP (pat, 0);
2027 while (GET_CODE (clob) == SUBREG || GET_CODE (clob) == STRICT_LOW_PART)
2028 clob = XEXP (clob, 0);
2031 SET_REGNO_REG_SET (reg_set_bitmap, REGNO (clob));
2033 record_last_mem_set_info (insn);
2036 /* Record things set by INSN.
2037 This data is used by oprs_not_set_p. */
2040 mark_oprs_set (rtx insn)
2042 rtx pat = PATTERN (insn);
2045 if (GET_CODE (pat) == SET)
2046 mark_set (pat, insn);
2047 else if (GET_CODE (pat) == PARALLEL)
2048 for (i = 0; i < XVECLEN (pat, 0); i++)
2050 rtx x = XVECEXP (pat, 0, i);
2052 if (GET_CODE (x) == SET)
2054 else if (GET_CODE (x) == CLOBBER)
2055 mark_clobber (x, insn);
2056 else if (GET_CODE (x) == CALL)
2060 else if (GET_CODE (pat) == CLOBBER)
2061 mark_clobber (pat, insn);
2062 else if (GET_CODE (pat) == CALL)
2067 /* Compute copy/constant propagation working variables. */
2069 /* Local properties of assignments. */
2070 static sbitmap *cprop_pavloc;
2071 static sbitmap *cprop_absaltered;
2073 /* Global properties of assignments (computed from the local properties). */
2074 static sbitmap *cprop_avin;
2075 static sbitmap *cprop_avout;
2077 /* Allocate vars used for copy/const propagation. N_BLOCKS is the number of
2078 basic blocks. N_SETS is the number of sets. */
2081 alloc_cprop_mem (int n_blocks, int n_sets)
2083 cprop_pavloc = sbitmap_vector_alloc (n_blocks, n_sets);
2084 cprop_absaltered = sbitmap_vector_alloc (n_blocks, n_sets);
2086 cprop_avin = sbitmap_vector_alloc (n_blocks, n_sets);
2087 cprop_avout = sbitmap_vector_alloc (n_blocks, n_sets);
2090 /* Free vars used by copy/const propagation. */
2093 free_cprop_mem (void)
2095 sbitmap_vector_free (cprop_pavloc);
2096 sbitmap_vector_free (cprop_absaltered);
2097 sbitmap_vector_free (cprop_avin);
2098 sbitmap_vector_free (cprop_avout);
2101 /* For each block, compute whether X is transparent. X is either an
2102 expression or an assignment [though we don't care which, for this context
2103 an assignment is treated as an expression]. For each block where an
2104 element of X is modified, set (SET_P == 1) or reset (SET_P == 0) the INDX
2108 compute_transp (const_rtx x, int indx, sbitmap *bmap, int set_p)
2114 /* repeat is used to turn tail-recursion into iteration since GCC
2115 can't do it when there's no return value. */
2121 code = GET_CODE (x);
2128 for (def = DF_REG_DEF_CHAIN (REGNO (x));
2130 def = DF_REF_NEXT_REG (def))
2131 SET_BIT (bmap[DF_REF_BB (def)->index], indx);
2136 for (def = DF_REG_DEF_CHAIN (REGNO (x));
2138 def = DF_REF_NEXT_REG (def))
2139 RESET_BIT (bmap[DF_REF_BB (def)->index], indx);
2145 if (! MEM_READONLY_P (x))
2150 /* First handle all the blocks with calls. We don't need to
2151 do any list walking for them. */
2152 EXECUTE_IF_SET_IN_BITMAP (blocks_with_calls, 0, bb_index, bi)
2155 SET_BIT (bmap[bb_index], indx);
2157 RESET_BIT (bmap[bb_index], indx);
2160 /* Now iterate over the blocks which have memory modifications
2161 but which do not have any calls. */
2162 EXECUTE_IF_AND_COMPL_IN_BITMAP (modify_mem_list_set,
2166 rtx list_entry = canon_modify_mem_list[bb_index];
2170 rtx dest, dest_addr;
2172 /* LIST_ENTRY must be an INSN of some kind that sets memory.
2173 Examine each hunk of memory that is modified. */
2175 dest = XEXP (list_entry, 0);
2176 list_entry = XEXP (list_entry, 1);
2177 dest_addr = XEXP (list_entry, 0);
2179 if (canon_true_dependence (dest, GET_MODE (dest), dest_addr,
2180 x, NULL_RTX, rtx_addr_varies_p))
2183 SET_BIT (bmap[bb_index], indx);
2185 RESET_BIT (bmap[bb_index], indx);
2188 list_entry = XEXP (list_entry, 1);
2213 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2217 /* If we are about to do the last recursive call
2218 needed at this level, change it into iteration.
2219 This function is called enough to be worth it. */
2226 compute_transp (XEXP (x, i), indx, bmap, set_p);
2228 else if (fmt[i] == 'E')
2229 for (j = 0; j < XVECLEN (x, i); j++)
2230 compute_transp (XVECEXP (x, i, j), indx, bmap, set_p);
2234 /* Top level routine to do the dataflow analysis needed by copy/const
2238 compute_cprop_data (void)
2240 compute_local_properties (cprop_absaltered, cprop_pavloc, NULL, &set_hash_table);
2241 compute_available (cprop_pavloc, cprop_absaltered,
2242 cprop_avout, cprop_avin);
2245 /* Copy/constant propagation. */
2247 /* Maximum number of register uses in an insn that we handle. */
2250 /* Table of uses found in an insn.
2251 Allocated statically to avoid alloc/free complexity and overhead. */
2252 static struct reg_use reg_use_table[MAX_USES];
2254 /* Index into `reg_use_table' while building it. */
2255 static int reg_use_count;
2257 /* Set up a list of register numbers used in INSN. The found uses are stored
2258 in `reg_use_table'. `reg_use_count' is initialized to zero before entry,
2259 and contains the number of uses in the table upon exit.
2261 ??? If a register appears multiple times we will record it multiple times.
2262 This doesn't hurt anything but it will slow things down. */
2265 find_used_regs (rtx *xptr, void *data ATTRIBUTE_UNUSED)
2272 /* repeat is used to turn tail-recursion into iteration since GCC
2273 can't do it when there's no return value. */
2278 code = GET_CODE (x);
2281 if (reg_use_count == MAX_USES)
2284 reg_use_table[reg_use_count].reg_rtx = x;
2288 /* Recursively scan the operands of this expression. */
2290 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2294 /* If we are about to do the last recursive call
2295 needed at this level, change it into iteration.
2296 This function is called enough to be worth it. */
2303 find_used_regs (&XEXP (x, i), data);
2305 else if (fmt[i] == 'E')
2306 for (j = 0; j < XVECLEN (x, i); j++)
2307 find_used_regs (&XVECEXP (x, i, j), data);
2311 /* Try to replace all non-SET_DEST occurrences of FROM in INSN with TO.
2312 Returns nonzero is successful. */
2315 try_replace_reg (rtx from, rtx to, rtx insn)
2317 rtx note = find_reg_equal_equiv_note (insn);
2320 rtx set = single_set (insn);
2322 /* Usually we substitute easy stuff, so we won't copy everything.
2323 We however need to take care to not duplicate non-trivial CONST
2327 validate_replace_src_group (from, to, insn);
2328 if (num_changes_pending () && apply_change_group ())
2331 /* Try to simplify SET_SRC if we have substituted a constant. */
2332 if (success && set && CONSTANT_P (to))
2334 src = simplify_rtx (SET_SRC (set));
2337 validate_change (insn, &SET_SRC (set), src, 0);
2340 /* If there is already a REG_EQUAL note, update the expression in it
2341 with our replacement. */
2342 if (note != 0 && REG_NOTE_KIND (note) == REG_EQUAL)
2343 set_unique_reg_note (insn, REG_EQUAL,
2344 simplify_replace_rtx (XEXP (note, 0), from, to));
2345 if (!success && set && reg_mentioned_p (from, SET_SRC (set)))
2347 /* If above failed and this is a single set, try to simplify the source of
2348 the set given our substitution. We could perhaps try this for multiple
2349 SETs, but it probably won't buy us anything. */
2350 src = simplify_replace_rtx (SET_SRC (set), from, to);
2352 if (!rtx_equal_p (src, SET_SRC (set))
2353 && validate_change (insn, &SET_SRC (set), src, 0))
2356 /* If we've failed perform the replacement, have a single SET to
2357 a REG destination and don't yet have a note, add a REG_EQUAL note
2358 to not lose information. */
2359 if (!success && note == 0 && set != 0 && REG_P (SET_DEST (set)))
2360 note = set_unique_reg_note (insn, REG_EQUAL, copy_rtx (src));
2363 /* REG_EQUAL may get simplified into register.
2364 We don't allow that. Remove that note. This code ought
2365 not to happen, because previous code ought to synthesize
2366 reg-reg move, but be on the safe side. */
2367 if (note && REG_NOTE_KIND (note) == REG_EQUAL && REG_P (XEXP (note, 0)))
2368 remove_note (insn, note);
2373 /* Find a set of REGNOs that are available on entry to INSN's block. Returns
2374 NULL no such set is found. */
2376 static struct expr *
2377 find_avail_set (int regno, rtx insn)
2379 /* SET1 contains the last set found that can be returned to the caller for
2380 use in a substitution. */
2381 struct expr *set1 = 0;
2383 /* Loops are not possible here. To get a loop we would need two sets
2384 available at the start of the block containing INSN. i.e. we would
2385 need two sets like this available at the start of the block:
2387 (set (reg X) (reg Y))
2388 (set (reg Y) (reg X))
2390 This can not happen since the set of (reg Y) would have killed the
2391 set of (reg X) making it unavailable at the start of this block. */
2395 struct expr *set = lookup_set (regno, &set_hash_table);
2397 /* Find a set that is available at the start of the block
2398 which contains INSN. */
2401 if (TEST_BIT (cprop_avin[BLOCK_FOR_INSN (insn)->index],
2404 set = next_set (regno, set);
2407 /* If no available set was found we've reached the end of the
2408 (possibly empty) copy chain. */
2412 gcc_assert (GET_CODE (set->expr) == SET);
2414 src = SET_SRC (set->expr);
2416 /* We know the set is available.
2417 Now check that SRC is ANTLOC (i.e. none of the source operands
2418 have changed since the start of the block).
2420 If the source operand changed, we may still use it for the next
2421 iteration of this loop, but we may not use it for substitutions. */
2423 if (gcse_constant_p (src) || oprs_not_set_p (src, insn))
2426 /* If the source of the set is anything except a register, then
2427 we have reached the end of the copy chain. */
2431 /* Follow the copy chain, i.e. start another iteration of the loop
2432 and see if we have an available copy into SRC. */
2433 regno = REGNO (src);
2436 /* SET1 holds the last set that was available and anticipatable at
2441 /* Subroutine of cprop_insn that tries to propagate constants into
2442 JUMP_INSNS. JUMP must be a conditional jump. If SETCC is non-NULL
2443 it is the instruction that immediately precedes JUMP, and must be a
2444 single SET of a register. FROM is what we will try to replace,
2445 SRC is the constant we will try to substitute for it. Returns nonzero
2446 if a change was made. */
2449 cprop_jump (basic_block bb, rtx setcc, rtx jump, rtx from, rtx src)
2451 rtx new_rtx, set_src, note_src;
2452 rtx set = pc_set (jump);
2453 rtx note = find_reg_equal_equiv_note (jump);
2457 note_src = XEXP (note, 0);
2458 if (GET_CODE (note_src) == EXPR_LIST)
2459 note_src = NULL_RTX;
2461 else note_src = NULL_RTX;
2463 /* Prefer REG_EQUAL notes except those containing EXPR_LISTs. */
2464 set_src = note_src ? note_src : SET_SRC (set);
2466 /* First substitute the SETCC condition into the JUMP instruction,
2467 then substitute that given values into this expanded JUMP. */
2468 if (setcc != NULL_RTX
2469 && !modified_between_p (from, setcc, jump)
2470 && !modified_between_p (src, setcc, jump))
2473 rtx setcc_set = single_set (setcc);
2474 rtx setcc_note = find_reg_equal_equiv_note (setcc);
2475 setcc_src = (setcc_note && GET_CODE (XEXP (setcc_note, 0)) != EXPR_LIST)
2476 ? XEXP (setcc_note, 0) : SET_SRC (setcc_set);
2477 set_src = simplify_replace_rtx (set_src, SET_DEST (setcc_set),
2483 new_rtx = simplify_replace_rtx (set_src, from, src);
2485 /* If no simplification can be made, then try the next register. */
2486 if (rtx_equal_p (new_rtx, SET_SRC (set)))
2489 /* If this is now a no-op delete it, otherwise this must be a valid insn. */
2490 if (new_rtx == pc_rtx)
2494 /* Ensure the value computed inside the jump insn to be equivalent
2495 to one computed by setcc. */
2496 if (setcc && modified_in_p (new_rtx, setcc))
2498 if (! validate_unshare_change (jump, &SET_SRC (set), new_rtx, 0))
2500 /* When (some) constants are not valid in a comparison, and there
2501 are two registers to be replaced by constants before the entire
2502 comparison can be folded into a constant, we need to keep
2503 intermediate information in REG_EQUAL notes. For targets with
2504 separate compare insns, such notes are added by try_replace_reg.
2505 When we have a combined compare-and-branch instruction, however,
2506 we need to attach a note to the branch itself to make this
2507 optimization work. */
2509 if (!rtx_equal_p (new_rtx, note_src))
2510 set_unique_reg_note (jump, REG_EQUAL, copy_rtx (new_rtx));
2514 /* Remove REG_EQUAL note after simplification. */
2516 remove_note (jump, note);
2520 /* Delete the cc0 setter. */
2521 if (setcc != NULL && CC0_P (SET_DEST (single_set (setcc))))
2522 delete_insn (setcc);
2525 global_const_prop_count++;
2526 if (dump_file != NULL)
2529 "GLOBAL CONST-PROP: Replacing reg %d in jump_insn %d with constant ",
2530 REGNO (from), INSN_UID (jump));
2531 print_rtl (dump_file, src);
2532 fprintf (dump_file, "\n");
2534 purge_dead_edges (bb);
2536 /* If a conditional jump has been changed into unconditional jump, remove
2537 the jump and make the edge fallthru - this is always called in
2539 if (new_rtx != pc_rtx && simplejump_p (jump))
2544 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); ei_next (&ei))
2545 if (e->dest != EXIT_BLOCK_PTR
2546 && BB_HEAD (e->dest) == JUMP_LABEL (jump))
2548 e->flags |= EDGE_FALLTHRU;
2558 constprop_register (rtx insn, rtx from, rtx to)
2562 /* Check for reg or cc0 setting instructions followed by
2563 conditional branch instructions first. */
2564 if ((sset = single_set (insn)) != NULL
2566 && any_condjump_p (NEXT_INSN (insn)) && onlyjump_p (NEXT_INSN (insn)))
2568 rtx dest = SET_DEST (sset);
2569 if ((REG_P (dest) || CC0_P (dest))
2570 && cprop_jump (BLOCK_FOR_INSN (insn), insn, NEXT_INSN (insn), from, to))
2574 /* Handle normal insns next. */
2575 if (NONJUMP_INSN_P (insn)
2576 && try_replace_reg (from, to, insn))
2579 /* Try to propagate a CONST_INT into a conditional jump.
2580 We're pretty specific about what we will handle in this
2581 code, we can extend this as necessary over time.
2583 Right now the insn in question must look like
2584 (set (pc) (if_then_else ...)) */
2585 else if (any_condjump_p (insn) && onlyjump_p (insn))
2586 return cprop_jump (BLOCK_FOR_INSN (insn), NULL, insn, from, to);
2590 /* Perform constant and copy propagation on INSN.
2591 The result is nonzero if a change was made. */
2594 cprop_insn (rtx insn)
2596 struct reg_use *reg_used;
2604 note_uses (&PATTERN (insn), find_used_regs, NULL);
2606 note = find_reg_equal_equiv_note (insn);
2608 /* We may win even when propagating constants into notes. */
2610 find_used_regs (&XEXP (note, 0), NULL);
2612 for (reg_used = ®_use_table[0]; reg_use_count > 0;
2613 reg_used++, reg_use_count--)
2615 unsigned int regno = REGNO (reg_used->reg_rtx);
2619 /* If the register has already been set in this block, there's
2620 nothing we can do. */
2621 if (! oprs_not_set_p (reg_used->reg_rtx, insn))
2624 /* Find an assignment that sets reg_used and is available
2625 at the start of the block. */
2626 set = find_avail_set (regno, insn);
2631 /* ??? We might be able to handle PARALLELs. Later. */
2632 gcc_assert (GET_CODE (pat) == SET);
2634 src = SET_SRC (pat);
2636 /* Constant propagation. */
2637 if (gcse_constant_p (src))
2639 if (constprop_register (insn, reg_used->reg_rtx, src))
2642 global_const_prop_count++;
2643 if (dump_file != NULL)
2645 fprintf (dump_file, "GLOBAL CONST-PROP: Replacing reg %d in ", regno);
2646 fprintf (dump_file, "insn %d with constant ", INSN_UID (insn));
2647 print_rtl (dump_file, src);
2648 fprintf (dump_file, "\n");
2650 if (INSN_DELETED_P (insn))
2654 else if (REG_P (src)
2655 && REGNO (src) >= FIRST_PSEUDO_REGISTER
2656 && REGNO (src) != regno)
2658 if (try_replace_reg (reg_used->reg_rtx, src, insn))
2661 global_copy_prop_count++;
2662 if (dump_file != NULL)
2664 fprintf (dump_file, "GLOBAL COPY-PROP: Replacing reg %d in insn %d",
2665 regno, INSN_UID (insn));
2666 fprintf (dump_file, " with reg %d\n", REGNO (src));
2669 /* The original insn setting reg_used may or may not now be
2670 deletable. We leave the deletion to flow. */
2671 /* FIXME: If it turns out that the insn isn't deletable,
2672 then we may have unnecessarily extended register lifetimes
2673 and made things worse. */
2678 if (changed && DEBUG_INSN_P (insn))
2684 /* Like find_used_regs, but avoid recording uses that appear in
2685 input-output contexts such as zero_extract or pre_dec. This
2686 restricts the cases we consider to those for which local cprop
2687 can legitimately make replacements. */
2690 local_cprop_find_used_regs (rtx *xptr, void *data)
2697 switch (GET_CODE (x))
2701 case STRICT_LOW_PART:
2710 /* Can only legitimately appear this early in the context of
2711 stack pushes for function arguments, but handle all of the
2712 codes nonetheless. */
2716 /* Setting a subreg of a register larger than word_mode leaves
2717 the non-written words unchanged. */
2718 if (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) > BITS_PER_WORD)
2726 find_used_regs (xptr, data);
2729 /* Try to perform local const/copy propagation on X in INSN. */
2732 do_local_cprop (rtx x, rtx insn)
2734 rtx newreg = NULL, newcnst = NULL;
2736 /* Rule out USE instructions and ASM statements as we don't want to
2737 change the hard registers mentioned. */
2739 && (REGNO (x) >= FIRST_PSEUDO_REGISTER
2740 || (GET_CODE (PATTERN (insn)) != USE
2741 && asm_noperands (PATTERN (insn)) < 0)))
2743 cselib_val *val = cselib_lookup (x, GET_MODE (x), 0, VOIDmode);
2744 struct elt_loc_list *l;
2748 for (l = val->locs; l; l = l->next)
2750 rtx this_rtx = l->loc;
2753 if (gcse_constant_p (this_rtx))
2755 if (REG_P (this_rtx) && REGNO (this_rtx) >= FIRST_PSEUDO_REGISTER
2756 /* Don't copy propagate if it has attached REG_EQUIV note.
2757 At this point this only function parameters should have
2758 REG_EQUIV notes and if the argument slot is used somewhere
2759 explicitly, it means address of parameter has been taken,
2760 so we should not extend the lifetime of the pseudo. */
2761 && (!(note = find_reg_note (l->setting_insn, REG_EQUIV, NULL_RTX))
2762 || ! MEM_P (XEXP (note, 0))))
2765 if (newcnst && constprop_register (insn, x, newcnst))
2767 if (dump_file != NULL)
2769 fprintf (dump_file, "LOCAL CONST-PROP: Replacing reg %d in ",
2771 fprintf (dump_file, "insn %d with constant ",
2773 print_rtl (dump_file, newcnst);
2774 fprintf (dump_file, "\n");
2776 local_const_prop_count++;
2779 else if (newreg && newreg != x && try_replace_reg (x, newreg, insn))
2781 if (dump_file != NULL)
2784 "LOCAL COPY-PROP: Replacing reg %d in insn %d",
2785 REGNO (x), INSN_UID (insn));
2786 fprintf (dump_file, " with reg %d\n", REGNO (newreg));
2788 local_copy_prop_count++;
2795 /* Do local const/copy propagation (i.e. within each basic block). */
2798 local_cprop_pass (void)
2802 struct reg_use *reg_used;
2803 bool changed = false;
2808 FOR_BB_INSNS (bb, insn)
2812 rtx note = find_reg_equal_equiv_note (insn);
2816 note_uses (&PATTERN (insn), local_cprop_find_used_regs,
2819 local_cprop_find_used_regs (&XEXP (note, 0), NULL);
2821 for (reg_used = ®_use_table[0]; reg_use_count > 0;
2822 reg_used++, reg_use_count--)
2824 if (do_local_cprop (reg_used->reg_rtx, insn))
2830 if (INSN_DELETED_P (insn))
2833 while (reg_use_count);
2835 cselib_process_insn (insn);
2838 /* Forget everything at the end of a basic block. */
2839 cselib_clear_table ();
2847 /* Similar to get_condition, only the resulting condition must be
2848 valid at JUMP, instead of at EARLIEST.
2850 This differs from noce_get_condition in ifcvt.c in that we prefer not to
2851 settle for the condition variable in the jump instruction being integral.
2852 We prefer to be able to record the value of a user variable, rather than
2853 the value of a temporary used in a condition. This could be solved by
2854 recording the value of *every* register scanned by canonicalize_condition,
2855 but this would require some code reorganization. */
2858 fis_get_condition (rtx jump)
2860 return get_condition (jump, NULL, false, true);
2863 /* Check the comparison COND to see if we can safely form an implicit set from
2864 it. COND is either an EQ or NE comparison. */
2867 implicit_set_cond_p (const_rtx cond)
2869 const enum machine_mode mode = GET_MODE (XEXP (cond, 0));
2870 const_rtx cst = XEXP (cond, 1);
2872 /* We can't perform this optimization if either operand might be or might
2873 contain a signed zero. */
2874 if (HONOR_SIGNED_ZEROS (mode))
2876 /* It is sufficient to check if CST is or contains a zero. We must
2877 handle float, complex, and vector. If any subpart is a zero, then
2878 the optimization can't be performed. */
2879 /* ??? The complex and vector checks are not implemented yet. We just
2880 always return zero for them. */
2881 if (GET_CODE (cst) == CONST_DOUBLE)
2884 REAL_VALUE_FROM_CONST_DOUBLE (d, cst);
2885 if (REAL_VALUES_EQUAL (d, dconst0))
2892 return gcse_constant_p (cst);
2895 /* Find the implicit sets of a function. An "implicit set" is a constraint
2896 on the value of a variable, implied by a conditional jump. For example,
2897 following "if (x == 2)", the then branch may be optimized as though the
2898 conditional performed an "explicit set", in this example, "x = 2". This
2899 function records the set patterns that are implicit at the start of each
2902 FIXME: This would be more effective if critical edges are pre-split. As
2903 it is now, we can't record implicit sets for blocks that have
2904 critical successor edges. This results in missed optimizations
2905 and in more (unnecessary) work in cfgcleanup.c:thread_jump(). */
2908 find_implicit_sets (void)
2910 basic_block bb, dest;
2916 /* Check for more than one successor. */
2917 if (EDGE_COUNT (bb->succs) > 1)
2919 cond = fis_get_condition (BB_END (bb));
2922 && (GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
2923 && REG_P (XEXP (cond, 0))
2924 && REGNO (XEXP (cond, 0)) >= FIRST_PSEUDO_REGISTER
2925 && implicit_set_cond_p (cond))
2927 dest = GET_CODE (cond) == EQ ? BRANCH_EDGE (bb)->dest
2928 : FALLTHRU_EDGE (bb)->dest;
2931 /* Record nothing for a critical edge. */
2932 && single_pred_p (dest)
2933 && dest != EXIT_BLOCK_PTR)
2935 new_rtx = gen_rtx_SET (VOIDmode, XEXP (cond, 0),
2937 implicit_sets[dest->index] = new_rtx;
2940 fprintf(dump_file, "Implicit set of reg %d in ",
2941 REGNO (XEXP (cond, 0)));
2942 fprintf(dump_file, "basic block %d\n", dest->index);
2950 fprintf (dump_file, "Found %d implicit sets\n", count);
2953 /* Bypass conditional jumps. */
2955 /* The value of last_basic_block at the beginning of the jump_bypass
2956 pass. The use of redirect_edge_and_branch_force may introduce new
2957 basic blocks, but the data flow analysis is only valid for basic
2958 block indices less than bypass_last_basic_block. */
2960 static int bypass_last_basic_block;
2962 /* Find a set of REGNO to a constant that is available at the end of basic
2963 block BB. Returns NULL if no such set is found. Based heavily upon
2966 static struct expr *
2967 find_bypass_set (int regno, int bb)
2969 struct expr *result = 0;
2974 struct expr *set = lookup_set (regno, &set_hash_table);
2978 if (TEST_BIT (cprop_avout[bb], set->bitmap_index))
2980 set = next_set (regno, set);
2986 gcc_assert (GET_CODE (set->expr) == SET);
2988 src = SET_SRC (set->expr);
2989 if (gcse_constant_p (src))
2995 regno = REGNO (src);
3001 /* Subroutine of bypass_block that checks whether a pseudo is killed by
3002 any of the instructions inserted on an edge. Jump bypassing places
3003 condition code setters on CFG edges using insert_insn_on_edge. This
3004 function is required to check that our data flow analysis is still
3005 valid prior to commit_edge_insertions. */
3008 reg_killed_on_edge (const_rtx reg, const_edge e)
3012 for (insn = e->insns.r; insn; insn = NEXT_INSN (insn))
3013 if (INSN_P (insn) && reg_set_p (reg, insn))
3019 /* Subroutine of bypass_conditional_jumps that attempts to bypass the given
3020 basic block BB which has more than one predecessor. If not NULL, SETCC
3021 is the first instruction of BB, which is immediately followed by JUMP_INSN
3022 JUMP. Otherwise, SETCC is NULL, and JUMP is the first insn of BB.
3023 Returns nonzero if a change was made.
3025 During the jump bypassing pass, we may place copies of SETCC instructions
3026 on CFG edges. The following routine must be careful to pay attention to
3027 these inserted insns when performing its transformations. */
3030 bypass_block (basic_block bb, rtx setcc, rtx jump)
3035 int may_be_loop_header;
3039 insn = (setcc != NULL) ? setcc : jump;
3041 /* Determine set of register uses in INSN. */
3043 note_uses (&PATTERN (insn), find_used_regs, NULL);
3044 note = find_reg_equal_equiv_note (insn);
3046 find_used_regs (&XEXP (note, 0), NULL);
3048 may_be_loop_header = false;
3049 FOR_EACH_EDGE (e, ei, bb->preds)
3050 if (e->flags & EDGE_DFS_BACK)
3052 may_be_loop_header = true;
3057 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
3061 if (e->flags & EDGE_COMPLEX)
3067 /* We can't redirect edges from new basic blocks. */
3068 if (e->src->index >= bypass_last_basic_block)
3074 /* The irreducible loops created by redirecting of edges entering the
3075 loop from outside would decrease effectiveness of some of the following
3076 optimizations, so prevent this. */
3077 if (may_be_loop_header
3078 && !(e->flags & EDGE_DFS_BACK))
3084 for (i = 0; i < reg_use_count; i++)
3086 struct reg_use *reg_used = ®_use_table[i];
3087 unsigned int regno = REGNO (reg_used->reg_rtx);
3088 basic_block dest, old_dest;
3092 set = find_bypass_set (regno, e->src->index);
3097 /* Check the data flow is valid after edge insertions. */
3098 if (e->insns.r && reg_killed_on_edge (reg_used->reg_rtx, e))
3101 src = SET_SRC (pc_set (jump));
3104 src = simplify_replace_rtx (src,
3105 SET_DEST (PATTERN (setcc)),
3106 SET_SRC (PATTERN (setcc)));
3108 new_rtx = simplify_replace_rtx (src, reg_used->reg_rtx,
3109 SET_SRC (set->expr));
3111 /* Jump bypassing may have already placed instructions on
3112 edges of the CFG. We can't bypass an outgoing edge that
3113 has instructions associated with it, as these insns won't
3114 get executed if the incoming edge is redirected. */
3116 if (new_rtx == pc_rtx)
3118 edest = FALLTHRU_EDGE (bb);
3119 dest = edest->insns.r ? NULL : edest->dest;
3121 else if (GET_CODE (new_rtx) == LABEL_REF)
3123 dest = BLOCK_FOR_INSN (XEXP (new_rtx, 0));
3124 /* Don't bypass edges containing instructions. */
3125 edest = find_edge (bb, dest);
3126 if (edest && edest->insns.r)
3132 /* Avoid unification of the edge with other edges from original
3133 branch. We would end up emitting the instruction on "both"
3136 if (dest && setcc && !CC0_P (SET_DEST (PATTERN (setcc)))
3137 && find_edge (e->src, dest))
3143 && dest != EXIT_BLOCK_PTR)
3145 redirect_edge_and_branch_force (e, dest);
3147 /* Copy the register setter to the redirected edge.
3148 Don't copy CC0 setters, as CC0 is dead after jump. */
3151 rtx pat = PATTERN (setcc);
3152 if (!CC0_P (SET_DEST (pat)))
3153 insert_insn_on_edge (copy_insn (pat), e);
3156 if (dump_file != NULL)
3158 fprintf (dump_file, "JUMP-BYPASS: Proved reg %d "
3159 "in jump_insn %d equals constant ",
3160 regno, INSN_UID (jump));
3161 print_rtl (dump_file, SET_SRC (set->expr));
3162 fprintf (dump_file, "\nBypass edge from %d->%d to %d\n",
3163 e->src->index, old_dest->index, dest->index);
3176 /* Find basic blocks with more than one predecessor that only contain a
3177 single conditional jump. If the result of the comparison is known at
3178 compile-time from any incoming edge, redirect that edge to the
3179 appropriate target. Returns nonzero if a change was made.
3181 This function is now mis-named, because we also handle indirect jumps. */
3184 bypass_conditional_jumps (void)
3192 /* Note we start at block 1. */
3193 if (ENTRY_BLOCK_PTR->next_bb == EXIT_BLOCK_PTR)
3196 bypass_last_basic_block = last_basic_block;
3197 mark_dfs_back_edges ();
3200 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb,
3201 EXIT_BLOCK_PTR, next_bb)
3203 /* Check for more than one predecessor. */
3204 if (!single_pred_p (bb))
3207 FOR_BB_INSNS (bb, insn)
3208 if (DEBUG_INSN_P (insn))
3210 else if (NONJUMP_INSN_P (insn))
3214 if (GET_CODE (PATTERN (insn)) != SET)
3217 dest = SET_DEST (PATTERN (insn));
3218 if (REG_P (dest) || CC0_P (dest))
3223 else if (JUMP_P (insn))
3225 if ((any_condjump_p (insn) || computed_jump_p (insn))
3226 && onlyjump_p (insn))
3227 changed |= bypass_block (bb, setcc, insn);
3230 else if (INSN_P (insn))
3235 /* If we bypassed any register setting insns, we inserted a
3236 copy on the redirected edge. These need to be committed. */
3238 commit_edge_insertions ();
3243 /* Compute PRE+LCM working variables. */
3245 /* Local properties of expressions. */
3246 /* Nonzero for expressions that are transparent in the block. */
3247 static sbitmap *transp;
3249 /* Nonzero for expressions that are computed (available) in the block. */
3250 static sbitmap *comp;
3252 /* Nonzero for expressions that are locally anticipatable in the block. */
3253 static sbitmap *antloc;
3255 /* Nonzero for expressions where this block is an optimal computation
3257 static sbitmap *pre_optimal;
3259 /* Nonzero for expressions which are redundant in a particular block. */
3260 static sbitmap *pre_redundant;
3262 /* Nonzero for expressions which should be inserted on a specific edge. */
3263 static sbitmap *pre_insert_map;
3265 /* Nonzero for expressions which should be deleted in a specific block. */
3266 static sbitmap *pre_delete_map;
3268 /* Contains the edge_list returned by pre_edge_lcm. */
3269 static struct edge_list *edge_list;
3271 /* Allocate vars used for PRE analysis. */
3274 alloc_pre_mem (int n_blocks, int n_exprs)
3276 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
3277 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
3278 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
3281 pre_redundant = NULL;
3282 pre_insert_map = NULL;
3283 pre_delete_map = NULL;
3284 ae_kill = sbitmap_vector_alloc (n_blocks, n_exprs);
3286 /* pre_insert and pre_delete are allocated later. */
3289 /* Free vars used for PRE analysis. */
3294 sbitmap_vector_free (transp);
3295 sbitmap_vector_free (comp);
3297 /* ANTLOC and AE_KILL are freed just after pre_lcm finishes. */
3300 sbitmap_vector_free (pre_optimal);
3302 sbitmap_vector_free (pre_redundant);
3304 sbitmap_vector_free (pre_insert_map);
3306 sbitmap_vector_free (pre_delete_map);
3308 transp = comp = NULL;
3309 pre_optimal = pre_redundant = pre_insert_map = pre_delete_map = NULL;
3312 /* Remove certain expressions from anticipatable and transparent
3313 sets of basic blocks that have incoming abnormal edge.
3314 For PRE remove potentially trapping expressions to avoid placing
3315 them on abnormal edges. For hoisting remove memory references that
3316 can be clobbered by calls. */
3319 prune_expressions (bool pre_p)
3321 sbitmap prune_exprs;
3325 prune_exprs = sbitmap_alloc (expr_hash_table.n_elems);
3326 sbitmap_zero (prune_exprs);
3327 for (ui = 0; ui < expr_hash_table.size; ui++)
3330 for (e = expr_hash_table.table[ui]; e != NULL; e = e->next_same_hash)
3332 /* Note potentially trapping expressions. */
3333 if (may_trap_p (e->expr))
3335 SET_BIT (prune_exprs, e->bitmap_index);
3339 if (!pre_p && MEM_P (e->expr))
3340 /* Note memory references that can be clobbered by a call.
3341 We do not split abnormal edges in hoisting, so would
3342 a memory reference get hoisted along an abnormal edge,
3343 it would be placed /before/ the call. Therefore, only
3344 constant memory references can be hoisted along abnormal
3347 if (GET_CODE (XEXP (e->expr, 0)) == SYMBOL_REF
3348 && CONSTANT_POOL_ADDRESS_P (XEXP (e->expr, 0)))
3351 if (MEM_READONLY_P (e->expr)
3352 && !MEM_VOLATILE_P (e->expr)
3353 && MEM_NOTRAP_P (e->expr))
3354 /* Constant memory reference, e.g., a PIC address. */
3357 /* ??? Optimally, we would use interprocedural alias
3358 analysis to determine if this mem is actually killed
3361 SET_BIT (prune_exprs, e->bitmap_index);
3371 /* If the current block is the destination of an abnormal edge, we
3372 kill all trapping (for PRE) and memory (for hoist) expressions
3373 because we won't be able to properly place the instruction on
3374 the edge. So make them neither anticipatable nor transparent.
3375 This is fairly conservative.
3377 ??? For hoisting it may be necessary to check for set-and-jump
3378 instructions here, not just for abnormal edges. The general problem
3379 is that when an expression cannot not be placed right at the end of
3380 a basic block we should account for any side-effects of a subsequent
3381 jump instructions that could clobber the expression. It would
3382 be best to implement this check along the lines of
3383 hoist_expr_reaches_here_p where the target block is already known
3384 and, hence, there's no need to conservatively prune expressions on
3385 "intermediate" set-and-jump instructions. */
3386 FOR_EACH_EDGE (e, ei, bb->preds)
3387 if ((e->flags & EDGE_ABNORMAL)
3388 && (pre_p || CALL_P (BB_END (e->src))))
3390 sbitmap_difference (antloc[bb->index],
3391 antloc[bb->index], prune_exprs);
3392 sbitmap_difference (transp[bb->index],
3393 transp[bb->index], prune_exprs);
3398 sbitmap_free (prune_exprs);
3401 /* It may be necessary to insert a large number of insns on edges to
3402 make the existing occurrences of expressions fully redundant. This
3403 routine examines the set of insertions and deletions and if the ratio
3404 of insertions to deletions is too high for a particular expression, then
3405 the expression is removed from the insertion/deletion sets.
3407 N_ELEMS is the number of elements in the hash table. */
3410 prune_insertions_deletions (int n_elems)
3412 sbitmap_iterator sbi;
3413 sbitmap prune_exprs;
3415 /* We always use I to iterate over blocks/edges and J to iterate over
3419 /* Counts for the number of times an expression needs to be inserted and
3420 number of times an expression can be removed as a result. */
3421 int *insertions = GCNEWVEC (int, n_elems);
3422 int *deletions = GCNEWVEC (int, n_elems);
3424 /* Set of expressions which require too many insertions relative to
3425 the number of deletions achieved. We will prune these out of the
3426 insertion/deletion sets. */
3427 prune_exprs = sbitmap_alloc (n_elems);
3428 sbitmap_zero (prune_exprs);
3430 /* Iterate over the edges counting the number of times each expression
3431 needs to be inserted. */
3432 for (i = 0; i < (unsigned) n_edges; i++)
3434 EXECUTE_IF_SET_IN_SBITMAP (pre_insert_map[i], 0, j, sbi)
3438 /* Similarly for deletions, but those occur in blocks rather than on
3440 for (i = 0; i < (unsigned) last_basic_block; i++)
3442 EXECUTE_IF_SET_IN_SBITMAP (pre_delete_map[i], 0, j, sbi)
3446 /* Now that we have accurate counts, iterate over the elements in the
3447 hash table and see if any need too many insertions relative to the
3448 number of evaluations that can be removed. If so, mark them in
3450 for (j = 0; j < (unsigned) n_elems; j++)
3452 && ((unsigned) insertions[j] / deletions[j]) > MAX_GCSE_INSERTION_RATIO)
3453 SET_BIT (prune_exprs, j);
3455 /* Now prune PRE_INSERT_MAP and PRE_DELETE_MAP based on PRUNE_EXPRS. */
3456 EXECUTE_IF_SET_IN_SBITMAP (prune_exprs, 0, j, sbi)
3458 for (i = 0; i < (unsigned) n_edges; i++)
3459 RESET_BIT (pre_insert_map[i], j);
3461 for (i = 0; i < (unsigned) last_basic_block; i++)
3462 RESET_BIT (pre_delete_map[i], j);
3465 sbitmap_free (prune_exprs);
3470 /* Top level routine to do the dataflow analysis needed by PRE. */
3473 compute_pre_data (void)
3477 compute_local_properties (transp, comp, antloc, &expr_hash_table);
3478 prune_expressions (true);
3479 sbitmap_vector_zero (ae_kill, last_basic_block);
3481 /* Compute ae_kill for each basic block using:
3488 sbitmap_a_or_b (ae_kill[bb->index], transp[bb->index], comp[bb->index]);
3489 sbitmap_not (ae_kill[bb->index], ae_kill[bb->index]);
3492 edge_list = pre_edge_lcm (expr_hash_table.n_elems, transp, comp, antloc,
3493 ae_kill, &pre_insert_map, &pre_delete_map);
3494 sbitmap_vector_free (antloc);
3496 sbitmap_vector_free (ae_kill);
3499 prune_insertions_deletions (expr_hash_table.n_elems);
3504 /* Return nonzero if an occurrence of expression EXPR in OCCR_BB would reach
3507 VISITED is a pointer to a working buffer for tracking which BB's have
3508 been visited. It is NULL for the top-level call.
3510 We treat reaching expressions that go through blocks containing the same
3511 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
3512 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
3513 2 as not reaching. The intent is to improve the probability of finding
3514 only one reaching expression and to reduce register lifetimes by picking
3515 the closest such expression. */
3518 pre_expr_reaches_here_p_work (basic_block occr_bb, struct expr *expr, basic_block bb, char *visited)
3523 FOR_EACH_EDGE (pred, ei, bb->preds)
3525 basic_block pred_bb = pred->src;
3527 if (pred->src == ENTRY_BLOCK_PTR
3528 /* Has predecessor has already been visited? */
3529 || visited[pred_bb->index])
3530 ;/* Nothing to do. */
3532 /* Does this predecessor generate this expression? */
3533 else if (TEST_BIT (comp[pred_bb->index], expr->bitmap_index))
3535 /* Is this the occurrence we're looking for?
3536 Note that there's only one generating occurrence per block
3537 so we just need to check the block number. */
3538 if (occr_bb == pred_bb)
3541 visited[pred_bb->index] = 1;
3543 /* Ignore this predecessor if it kills the expression. */
3544 else if (! TEST_BIT (transp[pred_bb->index], expr->bitmap_index))
3545 visited[pred_bb->index] = 1;
3547 /* Neither gen nor kill. */
3550 visited[pred_bb->index] = 1;
3551 if (pre_expr_reaches_here_p_work (occr_bb, expr, pred_bb, visited))
3556 /* All paths have been checked. */
3560 /* The wrapper for pre_expr_reaches_here_work that ensures that any
3561 memory allocated for that function is returned. */
3564 pre_expr_reaches_here_p (basic_block occr_bb, struct expr *expr, basic_block bb)
3567 char *visited = XCNEWVEC (char, last_basic_block);
3569 rval = pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited);
3576 /* Given an expr, generate RTL which we can insert at the end of a BB,
3577 or on an edge. Set the block number of any insns generated to
3581 process_insert_insn (struct expr *expr)
3583 rtx reg = expr->reaching_reg;
3584 rtx exp = copy_rtx (expr->expr);
3589 /* If the expression is something that's an operand, like a constant,
3590 just copy it to a register. */
3591 if (general_operand (exp, GET_MODE (reg)))
3592 emit_move_insn (reg, exp);
3594 /* Otherwise, make a new insn to compute this expression and make sure the
3595 insn will be recognized (this also adds any needed CLOBBERs). Copy the
3596 expression to make sure we don't have any sharing issues. */
3599 rtx insn = emit_insn (gen_rtx_SET (VOIDmode, reg, exp));
3601 if (insn_invalid_p (insn))
3612 /* Add EXPR to the end of basic block BB.
3614 This is used by both the PRE and code hoisting. */
3617 insert_insn_end_basic_block (struct expr *expr, basic_block bb)
3619 rtx insn = BB_END (bb);
3621 rtx reg = expr->reaching_reg;
3622 int regno = REGNO (reg);
3625 pat = process_insert_insn (expr);
3626 gcc_assert (pat && INSN_P (pat));
3629 while (NEXT_INSN (pat_end) != NULL_RTX)
3630 pat_end = NEXT_INSN (pat_end);
3632 /* If the last insn is a jump, insert EXPR in front [taking care to
3633 handle cc0, etc. properly]. Similarly we need to care trapping
3634 instructions in presence of non-call exceptions. */
3637 || (NONJUMP_INSN_P (insn)
3638 && (!single_succ_p (bb)
3639 || single_succ_edge (bb)->flags & EDGE_ABNORMAL)))
3645 /* If this is a jump table, then we can't insert stuff here. Since
3646 we know the previous real insn must be the tablejump, we insert
3647 the new instruction just before the tablejump. */
3648 if (GET_CODE (PATTERN (insn)) == ADDR_VEC
3649 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
3650 insn = prev_active_insn (insn);
3653 /* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts
3654 if cc0 isn't set. */
3655 note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
3657 insn = XEXP (note, 0);
3660 rtx maybe_cc0_setter = prev_nonnote_insn (insn);
3661 if (maybe_cc0_setter
3662 && INSN_P (maybe_cc0_setter)
3663 && sets_cc0_p (PATTERN (maybe_cc0_setter)))
3664 insn = maybe_cc0_setter;
3667 /* FIXME: What if something in cc0/jump uses value set in new insn? */
3668 new_insn = emit_insn_before_noloc (pat, insn, bb);
3671 /* Likewise if the last insn is a call, as will happen in the presence
3672 of exception handling. */
3673 else if (CALL_P (insn)
3674 && (!single_succ_p (bb)
3675 || single_succ_edge (bb)->flags & EDGE_ABNORMAL))
3677 /* Keeping in mind targets with small register classes and parameters
3678 in registers, we search backward and place the instructions before
3679 the first parameter is loaded. Do this for everyone for consistency
3680 and a presumption that we'll get better code elsewhere as well. */
3682 /* Since different machines initialize their parameter registers
3683 in different orders, assume nothing. Collect the set of all
3684 parameter registers. */
3685 insn = find_first_parameter_load (insn, BB_HEAD (bb));
3687 /* If we found all the parameter loads, then we want to insert
3688 before the first parameter load.
3690 If we did not find all the parameter loads, then we might have
3691 stopped on the head of the block, which could be a CODE_LABEL.
3692 If we inserted before the CODE_LABEL, then we would be putting
3693 the insn in the wrong basic block. In that case, put the insn
3694 after the CODE_LABEL. Also, respect NOTE_INSN_BASIC_BLOCK. */
3695 while (LABEL_P (insn)
3696 || NOTE_INSN_BASIC_BLOCK_P (insn))
3697 insn = NEXT_INSN (insn);
3699 new_insn = emit_insn_before_noloc (pat, insn, bb);
3702 new_insn = emit_insn_after_noloc (pat, insn, bb);
3707 add_label_notes (PATTERN (pat), new_insn);
3710 pat = NEXT_INSN (pat);
3713 gcse_create_count++;
3717 fprintf (dump_file, "PRE/HOIST: end of bb %d, insn %d, ",
3718 bb->index, INSN_UID (new_insn));
3719 fprintf (dump_file, "copying expression %d to reg %d\n",
3720 expr->bitmap_index, regno);
3724 /* Insert partially redundant expressions on edges in the CFG to make
3725 the expressions fully redundant. */
3728 pre_edge_insert (struct edge_list *edge_list, struct expr **index_map)
3730 int e, i, j, num_edges, set_size, did_insert = 0;
3733 /* Where PRE_INSERT_MAP is nonzero, we add the expression on that edge
3734 if it reaches any of the deleted expressions. */
3736 set_size = pre_insert_map[0]->size;
3737 num_edges = NUM_EDGES (edge_list);
3738 inserted = sbitmap_vector_alloc (num_edges, expr_hash_table.n_elems);
3739 sbitmap_vector_zero (inserted, num_edges);
3741 for (e = 0; e < num_edges; e++)
3744 basic_block bb = INDEX_EDGE_PRED_BB (edge_list, e);
3746 for (i = indx = 0; i < set_size; i++, indx += SBITMAP_ELT_BITS)
3748 SBITMAP_ELT_TYPE insert = pre_insert_map[e]->elms[i];
3750 for (j = indx; insert && j < (int) expr_hash_table.n_elems; j++, insert >>= 1)
3751 if ((insert & 1) != 0 && index_map[j]->reaching_reg != NULL_RTX)
3753 struct expr *expr = index_map[j];
3756 /* Now look at each deleted occurrence of this expression. */
3757 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
3759 if (! occr->deleted_p)
3762 /* Insert this expression on this edge if it would
3763 reach the deleted occurrence in BB. */
3764 if (!TEST_BIT (inserted[e], j))
3767 edge eg = INDEX_EDGE (edge_list, e);
3769 /* We can't insert anything on an abnormal and
3770 critical edge, so we insert the insn at the end of
3771 the previous block. There are several alternatives
3772 detailed in Morgans book P277 (sec 10.5) for
3773 handling this situation. This one is easiest for
3776 if (eg->flags & EDGE_ABNORMAL)
3777 insert_insn_end_basic_block (index_map[j], bb);
3780 insn = process_insert_insn (index_map[j]);
3781 insert_insn_on_edge (insn, eg);
3786 fprintf (dump_file, "PRE: edge (%d,%d), ",
3788 INDEX_EDGE_SUCC_BB (edge_list, e)->index);
3789 fprintf (dump_file, "copy expression %d\n",
3790 expr->bitmap_index);
3793 update_ld_motion_stores (expr);
3794 SET_BIT (inserted[e], j);
3796 gcse_create_count++;
3803 sbitmap_vector_free (inserted);
3807 /* Copy the result of EXPR->EXPR generated by INSN to EXPR->REACHING_REG.
3808 Given "old_reg <- expr" (INSN), instead of adding after it
3809 reaching_reg <- old_reg
3810 it's better to do the following:
3811 reaching_reg <- expr
3812 old_reg <- reaching_reg
3813 because this way copy propagation can discover additional PRE
3814 opportunities. But if this fails, we try the old way.
3815 When "expr" is a store, i.e.
3816 given "MEM <- old_reg", instead of adding after it
3817 reaching_reg <- old_reg
3818 it's better to add it before as follows:
3819 reaching_reg <- old_reg
3820 MEM <- reaching_reg. */
3823 pre_insert_copy_insn (struct expr *expr, rtx insn)
3825 rtx reg = expr->reaching_reg;
3826 int regno = REGNO (reg);
3827 int indx = expr->bitmap_index;
3828 rtx pat = PATTERN (insn);
3829 rtx set, first_set, new_insn;
3833 /* This block matches the logic in hash_scan_insn. */
3834 switch (GET_CODE (pat))
3841 /* Search through the parallel looking for the set whose
3842 source was the expression that we're interested in. */
3843 first_set = NULL_RTX;
3845 for (i = 0; i < XVECLEN (pat, 0); i++)
3847 rtx x = XVECEXP (pat, 0, i);
3848 if (GET_CODE (x) == SET)
3850 /* If the source was a REG_EQUAL or REG_EQUIV note, we
3851 may not find an equivalent expression, but in this
3852 case the PARALLEL will have a single set. */
3853 if (first_set == NULL_RTX)
3855 if (expr_equiv_p (SET_SRC (x), expr->expr))
3863 gcc_assert (first_set);
3864 if (set == NULL_RTX)
3872 if (REG_P (SET_DEST (set)))
3874 old_reg = SET_DEST (set);
3875 /* Check if we can modify the set destination in the original insn. */
3876 if (validate_change (insn, &SET_DEST (set), reg, 0))
3878 new_insn = gen_move_insn (old_reg, reg);
3879 new_insn = emit_insn_after (new_insn, insn);
3883 new_insn = gen_move_insn (reg, old_reg);
3884 new_insn = emit_insn_after (new_insn, insn);
3887 else /* This is possible only in case of a store to memory. */
3889 old_reg = SET_SRC (set);
3890 new_insn = gen_move_insn (reg, old_reg);
3892 /* Check if we can modify the set source in the original insn. */
3893 if (validate_change (insn, &SET_SRC (set), reg, 0))
3894 new_insn = emit_insn_before (new_insn, insn);
3896 new_insn = emit_insn_after (new_insn, insn);
3899 gcse_create_count++;
3903 "PRE: bb %d, insn %d, copy expression %d in insn %d to reg %d\n",
3904 BLOCK_FOR_INSN (insn)->index, INSN_UID (new_insn), indx,
3905 INSN_UID (insn), regno);
3908 /* Copy available expressions that reach the redundant expression
3909 to `reaching_reg'. */
3912 pre_insert_copies (void)
3914 unsigned int i, added_copy;
3919 /* For each available expression in the table, copy the result to
3920 `reaching_reg' if the expression reaches a deleted one.
3922 ??? The current algorithm is rather brute force.
3923 Need to do some profiling. */
3925 for (i = 0; i < expr_hash_table.size; i++)
3926 for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
3928 /* If the basic block isn't reachable, PPOUT will be TRUE. However,
3929 we don't want to insert a copy here because the expression may not
3930 really be redundant. So only insert an insn if the expression was
3931 deleted. This test also avoids further processing if the
3932 expression wasn't deleted anywhere. */
3933 if (expr->reaching_reg == NULL)
3936 /* Set when we add a copy for that expression. */
3939 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
3941 if (! occr->deleted_p)
3944 for (avail = expr->avail_occr; avail != NULL; avail = avail->next)
3946 rtx insn = avail->insn;
3948 /* No need to handle this one if handled already. */
3949 if (avail->copied_p)
3952 /* Don't handle this one if it's a redundant one. */
3953 if (INSN_DELETED_P (insn))
3956 /* Or if the expression doesn't reach the deleted one. */
3957 if (! pre_expr_reaches_here_p (BLOCK_FOR_INSN (avail->insn),
3959 BLOCK_FOR_INSN (occr->insn)))
3964 /* Copy the result of avail to reaching_reg. */
3965 pre_insert_copy_insn (expr, insn);
3966 avail->copied_p = 1;
3971 update_ld_motion_stores (expr);
3975 /* Emit move from SRC to DEST noting the equivalence with expression computed
3978 gcse_emit_move_after (rtx src, rtx dest, rtx insn)
3981 rtx set = single_set (insn), set2;
3985 /* This should never fail since we're creating a reg->reg copy
3986 we've verified to be valid. */
3988 new_rtx = emit_insn_after (gen_move_insn (dest, src), insn);
3990 /* Note the equivalence for local CSE pass. */
3991 set2 = single_set (new_rtx);
3992 if (!set2 || !rtx_equal_p (SET_DEST (set2), dest))
3994 if ((note = find_reg_equal_equiv_note (insn)))
3995 eqv = XEXP (note, 0);
3997 eqv = SET_SRC (set);
3999 set_unique_reg_note (new_rtx, REG_EQUAL, copy_insn_1 (eqv));
4004 /* Delete redundant computations.
4005 Deletion is done by changing the insn to copy the `reaching_reg' of
4006 the expression into the result of the SET. It is left to later passes
4007 (cprop, cse2, flow, combine, regmove) to propagate the copy or eliminate it.
4009 Returns nonzero if a change is made. */
4020 for (i = 0; i < expr_hash_table.size; i++)
4021 for (expr = expr_hash_table.table[i];
4023 expr = expr->next_same_hash)
4025 int indx = expr->bitmap_index;
4027 /* We only need to search antic_occr since we require
4030 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
4032 rtx insn = occr->insn;
4034 basic_block bb = BLOCK_FOR_INSN (insn);
4036 /* We only delete insns that have a single_set. */
4037 if (TEST_BIT (pre_delete_map[bb->index], indx)
4038 && (set = single_set (insn)) != 0
4039 && dbg_cnt (pre_insn))
4041 /* Create a pseudo-reg to store the result of reaching
4042 expressions into. Get the mode for the new pseudo from
4043 the mode of the original destination pseudo. */
4044 if (expr->reaching_reg == NULL)
4045 expr->reaching_reg = gen_reg_rtx_and_attrs (SET_DEST (set));
4047 gcse_emit_move_after (expr->reaching_reg, SET_DEST (set), insn);
4049 occr->deleted_p = 1;
4056 "PRE: redundant insn %d (expression %d) in ",
4057 INSN_UID (insn), indx);
4058 fprintf (dump_file, "bb %d, reaching reg is %d\n",
4059 bb->index, REGNO (expr->reaching_reg));
4068 /* Perform GCSE optimizations using PRE.
4069 This is called by one_pre_gcse_pass after all the dataflow analysis
4072 This is based on the original Morel-Renvoise paper Fred Chow's thesis, and
4073 lazy code motion from Knoop, Ruthing and Steffen as described in Advanced
4074 Compiler Design and Implementation.
4076 ??? A new pseudo reg is created to hold the reaching expression. The nice
4077 thing about the classical approach is that it would try to use an existing
4078 reg. If the register can't be adequately optimized [i.e. we introduce
4079 reload problems], one could add a pass here to propagate the new register
4082 ??? We don't handle single sets in PARALLELs because we're [currently] not
4083 able to copy the rest of the parallel when we insert copies to create full
4084 redundancies from partial redundancies. However, there's no reason why we
4085 can't handle PARALLELs in the cases where there are no partial
4092 int did_insert, changed;
4093 struct expr **index_map;
4096 /* Compute a mapping from expression number (`bitmap_index') to
4097 hash table entry. */
4099 index_map = XCNEWVEC (struct expr *, expr_hash_table.n_elems);
4100 for (i = 0; i < expr_hash_table.size; i++)
4101 for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
4102 index_map[expr->bitmap_index] = expr;
4104 /* Delete the redundant insns first so that
4105 - we know what register to use for the new insns and for the other
4106 ones with reaching expressions
4107 - we know which insns are redundant when we go to create copies */
4109 changed = pre_delete ();
4110 did_insert = pre_edge_insert (edge_list, index_map);
4112 /* In other places with reaching expressions, copy the expression to the
4113 specially allocated pseudo-reg that reaches the redundant expr. */
4114 pre_insert_copies ();
4117 commit_edge_insertions ();
4125 /* Top level routine to perform one PRE GCSE pass.
4127 Return nonzero if a change was made. */
4130 one_pre_gcse_pass (void)
4134 gcse_subst_count = 0;
4135 gcse_create_count = 0;
4137 /* Return if there's nothing to do, or it is too expensive. */
4138 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
4139 || is_too_expensive (_("PRE disabled")))
4142 /* We need alias. */
4143 init_alias_analysis ();
4146 gcc_obstack_init (&gcse_obstack);
4149 alloc_hash_table (&expr_hash_table, 0);
4150 add_noreturn_fake_exit_edges ();
4152 compute_ld_motion_mems ();
4154 compute_hash_table (&expr_hash_table);
4155 trim_ld_motion_mems ();
4157 dump_hash_table (dump_file, "Expression", &expr_hash_table);
4159 if (expr_hash_table.n_elems > 0)
4161 alloc_pre_mem (last_basic_block, expr_hash_table.n_elems);
4162 compute_pre_data ();
4163 changed |= pre_gcse ();
4164 free_edge_list (edge_list);
4169 remove_fake_exit_edges ();
4170 free_hash_table (&expr_hash_table);
4173 obstack_free (&gcse_obstack, NULL);
4175 /* We are finished with alias. */
4176 end_alias_analysis ();
4180 fprintf (dump_file, "PRE GCSE of %s, %d basic blocks, %d bytes needed, ",
4181 current_function_name (), n_basic_blocks, bytes_used);
4182 fprintf (dump_file, "%d substs, %d insns created\n",
4183 gcse_subst_count, gcse_create_count);
4189 /* If X contains any LABEL_REF's, add REG_LABEL_OPERAND notes for them
4190 to INSN. If such notes are added to an insn which references a
4191 CODE_LABEL, the LABEL_NUSES count is incremented. We have to add
4192 that note, because the following loop optimization pass requires
4195 /* ??? If there was a jump optimization pass after gcse and before loop,
4196 then we would not need to do this here, because jump would add the
4197 necessary REG_LABEL_OPERAND and REG_LABEL_TARGET notes. */
4200 add_label_notes (rtx x, rtx insn)
4202 enum rtx_code code = GET_CODE (x);
4206 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
4208 /* This code used to ignore labels that referred to dispatch tables to
4209 avoid flow generating (slightly) worse code.
4211 We no longer ignore such label references (see LABEL_REF handling in
4212 mark_jump_label for additional information). */
4214 /* There's no reason for current users to emit jump-insns with
4215 such a LABEL_REF, so we don't have to handle REG_LABEL_TARGET
4217 gcc_assert (!JUMP_P (insn));
4218 add_reg_note (insn, REG_LABEL_OPERAND, XEXP (x, 0));
4220 if (LABEL_P (XEXP (x, 0)))
4221 LABEL_NUSES (XEXP (x, 0))++;
4226 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
4229 add_label_notes (XEXP (x, i), insn);
4230 else if (fmt[i] == 'E')
4231 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4232 add_label_notes (XVECEXP (x, i, j), insn);
4236 /* Code Hoisting variables and subroutines. */
4238 /* Very busy expressions. */
4239 static sbitmap *hoist_vbein;
4240 static sbitmap *hoist_vbeout;
4242 /* ??? We could compute post dominators and run this algorithm in
4243 reverse to perform tail merging, doing so would probably be
4244 more effective than the tail merging code in jump.c.
4246 It's unclear if tail merging could be run in parallel with
4247 code hoisting. It would be nice. */
4249 /* Allocate vars used for code hoisting analysis. */
4252 alloc_code_hoist_mem (int n_blocks, int n_exprs)
4254 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
4255 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
4256 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
4258 hoist_vbein = sbitmap_vector_alloc (n_blocks, n_exprs);
4259 hoist_vbeout = sbitmap_vector_alloc (n_blocks, n_exprs);
4262 /* Free vars used for code hoisting analysis. */
4265 free_code_hoist_mem (void)
4267 sbitmap_vector_free (antloc);
4268 sbitmap_vector_free (transp);
4269 sbitmap_vector_free (comp);
4271 sbitmap_vector_free (hoist_vbein);
4272 sbitmap_vector_free (hoist_vbeout);
4274 free_dominance_info (CDI_DOMINATORS);
4277 /* Compute the very busy expressions at entry/exit from each block.
4279 An expression is very busy if all paths from a given point
4280 compute the expression. */
4283 compute_code_hoist_vbeinout (void)
4285 int changed, passes;
4288 sbitmap_vector_zero (hoist_vbeout, last_basic_block);
4289 sbitmap_vector_zero (hoist_vbein, last_basic_block);
4298 /* We scan the blocks in the reverse order to speed up
4300 FOR_EACH_BB_REVERSE (bb)
4302 if (bb->next_bb != EXIT_BLOCK_PTR)
4304 sbitmap_intersection_of_succs (hoist_vbeout[bb->index],
4305 hoist_vbein, bb->index);
4307 /* Include expressions in VBEout that are calculated
4308 in BB and available at its end. */
4309 sbitmap_a_or_b (hoist_vbeout[bb->index],
4310 hoist_vbeout[bb->index], comp[bb->index]);
4313 changed |= sbitmap_a_or_b_and_c_cg (hoist_vbein[bb->index],
4315 hoist_vbeout[bb->index],
4324 fprintf (dump_file, "hoisting vbeinout computation: %d passes\n", passes);
4328 fprintf (dump_file, "vbein (%d): ", bb->index);
4329 dump_sbitmap_file (dump_file, hoist_vbein[bb->index]);
4330 fprintf (dump_file, "vbeout(%d): ", bb->index);
4331 dump_sbitmap_file (dump_file, hoist_vbeout[bb->index]);
4336 /* Top level routine to do the dataflow analysis needed by code hoisting. */
4339 compute_code_hoist_data (void)
4341 compute_local_properties (transp, comp, antloc, &expr_hash_table);
4342 prune_expressions (false);
4343 compute_code_hoist_vbeinout ();
4344 calculate_dominance_info (CDI_DOMINATORS);
4346 fprintf (dump_file, "\n");
4349 /* Determine if the expression identified by EXPR_INDEX would
4350 reach BB unimpared if it was placed at the end of EXPR_BB.
4351 Stop the search if the expression would need to be moved more
4352 than DISTANCE instructions.
4354 It's unclear exactly what Muchnick meant by "unimpared". It seems
4355 to me that the expression must either be computed or transparent in
4356 *every* block in the path(s) from EXPR_BB to BB. Any other definition
4357 would allow the expression to be hoisted out of loops, even if
4358 the expression wasn't a loop invariant.
4360 Contrast this to reachability for PRE where an expression is
4361 considered reachable if *any* path reaches instead of *all*
4365 hoist_expr_reaches_here_p (basic_block expr_bb, int expr_index, basic_block bb,
4366 char *visited, int distance, int *bb_size)
4370 int visited_allocated_locally = 0;
4372 /* Terminate the search if distance, for which EXPR is allowed to move,
4376 distance -= bb_size[bb->index];
4382 gcc_assert (distance == 0);
4384 if (visited == NULL)
4386 visited_allocated_locally = 1;
4387 visited = XCNEWVEC (char, last_basic_block);
4390 FOR_EACH_EDGE (pred, ei, bb->preds)
4392 basic_block pred_bb = pred->src;
4394 if (pred->src == ENTRY_BLOCK_PTR)
4396 else if (pred_bb == expr_bb)
4398 else if (visited[pred_bb->index])
4401 else if (! TEST_BIT (transp[pred_bb->index], expr_index))
4407 visited[pred_bb->index] = 1;
4408 if (! hoist_expr_reaches_here_p (expr_bb, expr_index, pred_bb,
4409 visited, distance, bb_size))
4413 if (visited_allocated_locally)
4416 return (pred == NULL);
4419 /* Find occurence in BB. */
4420 static struct occr *
4421 find_occr_in_bb (struct occr *occr, basic_block bb)
4423 /* Find the right occurrence of this expression. */
4424 while (occr && BLOCK_FOR_INSN (occr->insn) != bb)
4430 /* Actually perform code hoisting. */
4435 basic_block bb, dominated;
4436 VEC (basic_block, heap) *dom_tree_walk;
4437 unsigned int dom_tree_walk_index;
4438 VEC (basic_block, heap) *domby;
4440 struct expr **index_map;
4446 /* Compute a mapping from expression number (`bitmap_index') to
4447 hash table entry. */
4449 index_map = XCNEWVEC (struct expr *, expr_hash_table.n_elems);
4450 for (i = 0; i < expr_hash_table.size; i++)
4451 for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
4452 index_map[expr->bitmap_index] = expr;
4454 /* Calculate sizes of basic blocks and note how far
4455 each instruction is from the start of its block. We then use this
4456 data to restrict distance an expression can travel. */
4458 to_bb_head = XCNEWVEC (int, get_max_uid ());
4459 bb_size = XCNEWVEC (int, last_basic_block);
4467 FOR_BB_INSNS (bb, insn)
4469 /* Don't count debug instructions to avoid them affecting
4470 decision choices. */
4471 if (NONDEBUG_INSN_P (insn))
4472 to_bb_head[INSN_UID (insn)] = to_head++;
4475 bb_size[bb->index] = to_head;
4478 gcc_assert (EDGE_COUNT (ENTRY_BLOCK_PTR->succs) == 1
4479 && (EDGE_SUCC (ENTRY_BLOCK_PTR, 0)->dest
4480 == ENTRY_BLOCK_PTR->next_bb));
4482 dom_tree_walk = get_all_dominated_blocks (CDI_DOMINATORS,
4483 ENTRY_BLOCK_PTR->next_bb);
4485 /* Walk over each basic block looking for potentially hoistable
4486 expressions, nothing gets hoisted from the entry block. */
4487 FOR_EACH_VEC_ELT (basic_block, dom_tree_walk, dom_tree_walk_index, bb)
4489 domby = get_dominated_to_depth (CDI_DOMINATORS, bb, MAX_HOIST_DEPTH);
4491 if (VEC_length (basic_block, domby) == 0)
4494 /* Examine each expression that is very busy at the exit of this
4495 block. These are the potentially hoistable expressions. */
4496 for (i = 0; i < hoist_vbeout[bb->index]->n_bits; i++)
4498 if (TEST_BIT (hoist_vbeout[bb->index], i))
4500 /* Current expression. */
4501 struct expr *expr = index_map[i];
4502 /* Number of occurences of EXPR that can be hoisted to BB. */
4504 /* Basic blocks that have occurences reachable from BB. */
4505 bitmap_head _from_bbs, *from_bbs = &_from_bbs;
4506 /* Occurences reachable from BB. */
4507 VEC (occr_t, heap) *occrs_to_hoist = NULL;
4508 /* We want to insert the expression into BB only once, so
4509 note when we've inserted it. */
4510 int insn_inserted_p;
4513 bitmap_initialize (from_bbs, 0);
4515 /* If an expression is computed in BB and is available at end of
4516 BB, hoist all occurences dominated by BB to BB. */
4517 if (TEST_BIT (comp[bb->index], i))
4519 occr = find_occr_in_bb (expr->antic_occr, bb);
4523 /* An occurence might've been already deleted
4524 while processing a dominator of BB. */
4525 if (!occr->deleted_p)
4527 gcc_assert (NONDEBUG_INSN_P (occr->insn));
4535 /* We've found a potentially hoistable expression, now
4536 we look at every block BB dominates to see if it
4537 computes the expression. */
4538 FOR_EACH_VEC_ELT (basic_block, domby, j, dominated)
4542 /* Ignore self dominance. */
4543 if (bb == dominated)
4545 /* We've found a dominated block, now see if it computes
4546 the busy expression and whether or not moving that
4547 expression to the "beginning" of that block is safe. */
4548 if (!TEST_BIT (antloc[dominated->index], i))
4551 occr = find_occr_in_bb (expr->antic_occr, dominated);
4554 /* An occurence might've been already deleted
4555 while processing a dominator of BB. */
4556 if (occr->deleted_p)
4558 gcc_assert (NONDEBUG_INSN_P (occr->insn));
4560 max_distance = expr->max_distance;
4561 if (max_distance > 0)
4562 /* Adjust MAX_DISTANCE to account for the fact that
4563 OCCR won't have to travel all of DOMINATED, but
4565 max_distance += (bb_size[dominated->index]
4566 - to_bb_head[INSN_UID (occr->insn)]);
4568 /* Note if the expression would reach the dominated block
4569 unimpared if it was placed at the end of BB.
4571 Keep track of how many times this expression is hoistable
4572 from a dominated block into BB. */
4573 if (hoist_expr_reaches_here_p (bb, i, dominated, NULL,
4574 max_distance, bb_size))
4577 VEC_safe_push (occr_t, heap,
4578 occrs_to_hoist, occr);
4579 bitmap_set_bit (from_bbs, dominated->index);
4583 /* If we found more than one hoistable occurrence of this
4584 expression, then note it in the vector of expressions to
4585 hoist. It makes no sense to hoist things which are computed
4586 in only one BB, and doing so tends to pessimize register
4587 allocation. One could increase this value to try harder
4588 to avoid any possible code expansion due to register
4589 allocation issues; however experiments have shown that
4590 the vast majority of hoistable expressions are only movable
4591 from two successors, so raising this threshold is likely
4592 to nullify any benefit we get from code hoisting. */
4593 if (hoistable > 1 && dbg_cnt (hoist_insn))
4595 /* If (hoistable != VEC_length), then there is
4596 an occurence of EXPR in BB itself. Don't waste
4597 time looking for LCA in this case. */
4598 if ((unsigned) hoistable
4599 == VEC_length (occr_t, occrs_to_hoist))
4603 lca = nearest_common_dominator_for_set (CDI_DOMINATORS,
4606 /* Punt, it's better to hoist these occurences to
4608 VEC_free (occr_t, heap, occrs_to_hoist);
4612 /* Punt, no point hoisting a single occurence. */
4613 VEC_free (occr_t, heap, occrs_to_hoist);
4615 insn_inserted_p = 0;
4617 /* Walk through occurences of I'th expressions we want
4618 to hoist to BB and make the transformations. */
4619 FOR_EACH_VEC_ELT (occr_t, occrs_to_hoist, j, occr)
4624 gcc_assert (!occr->deleted_p);
4627 set = single_set (insn);
4630 /* Create a pseudo-reg to store the result of reaching
4631 expressions into. Get the mode for the new pseudo
4632 from the mode of the original destination pseudo.
4634 It is important to use new pseudos whenever we
4635 emit a set. This will allow reload to use
4636 rematerialization for such registers. */
4637 if (!insn_inserted_p)
4639 = gen_reg_rtx_and_attrs (SET_DEST (set));
4641 gcse_emit_move_after (expr->reaching_reg, SET_DEST (set),
4644 occr->deleted_p = 1;
4648 if (!insn_inserted_p)
4650 insert_insn_end_basic_block (expr, bb);
4651 insn_inserted_p = 1;
4655 VEC_free (occr_t, heap, occrs_to_hoist);
4656 bitmap_clear (from_bbs);
4659 VEC_free (basic_block, heap, domby);
4662 VEC_free (basic_block, heap, dom_tree_walk);
4670 /* Top level routine to perform one code hoisting (aka unification) pass
4672 Return nonzero if a change was made. */
4675 one_code_hoisting_pass (void)
4679 gcse_subst_count = 0;
4680 gcse_create_count = 0;
4682 /* Return if there's nothing to do, or it is too expensive. */
4683 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
4684 || is_too_expensive (_("GCSE disabled")))
4687 doing_code_hoisting_p = true;
4689 /* We need alias. */
4690 init_alias_analysis ();
4693 gcc_obstack_init (&gcse_obstack);
4696 alloc_hash_table (&expr_hash_table, 0);
4697 compute_hash_table (&expr_hash_table);
4699 dump_hash_table (dump_file, "Code Hosting Expressions", &expr_hash_table);
4701 if (expr_hash_table.n_elems > 0)
4703 alloc_code_hoist_mem (last_basic_block, expr_hash_table.n_elems);
4704 compute_code_hoist_data ();
4705 changed = hoist_code ();
4706 free_code_hoist_mem ();
4709 free_hash_table (&expr_hash_table);
4711 obstack_free (&gcse_obstack, NULL);
4713 /* We are finished with alias. */
4714 end_alias_analysis ();
4718 fprintf (dump_file, "HOIST of %s, %d basic blocks, %d bytes needed, ",
4719 current_function_name (), n_basic_blocks, bytes_used);
4720 fprintf (dump_file, "%d substs, %d insns created\n",
4721 gcse_subst_count, gcse_create_count);
4724 doing_code_hoisting_p = false;
4729 /* Here we provide the things required to do store motion towards
4730 the exit. In order for this to be effective, gcse also needed to
4731 be taught how to move a load when it is kill only by a store to itself.
4736 void foo(float scale)
4738 for (i=0; i<10; i++)
4742 'i' is both loaded and stored to in the loop. Normally, gcse cannot move
4743 the load out since its live around the loop, and stored at the bottom
4746 The 'Load Motion' referred to and implemented in this file is
4747 an enhancement to gcse which when using edge based lcm, recognizes
4748 this situation and allows gcse to move the load out of the loop.
4750 Once gcse has hoisted the load, store motion can then push this
4751 load towards the exit, and we end up with no loads or stores of 'i'
4755 pre_ldst_expr_hash (const void *p)
4757 int do_not_record_p = 0;
4758 const struct ls_expr *const x = (const struct ls_expr *) p;
4759 return hash_rtx (x->pattern, GET_MODE (x->pattern), &do_not_record_p, NULL, false);
4763 pre_ldst_expr_eq (const void *p1, const void *p2)
4765 const struct ls_expr *const ptr1 = (const struct ls_expr *) p1,
4766 *const ptr2 = (const struct ls_expr *) p2;
4767 return expr_equiv_p (ptr1->pattern, ptr2->pattern);
4770 /* This will search the ldst list for a matching expression. If it
4771 doesn't find one, we create one and initialize it. */
4773 static struct ls_expr *
4776 int do_not_record_p = 0;
4777 struct ls_expr * ptr;
4782 hash = hash_rtx (x, GET_MODE (x), &do_not_record_p,
4783 NULL, /*have_reg_qty=*/false);
4786 slot = htab_find_slot_with_hash (pre_ldst_table, &e, hash, INSERT);
4788 return (struct ls_expr *)*slot;
4790 ptr = XNEW (struct ls_expr);
4792 ptr->next = pre_ldst_mems;
4795 ptr->pattern_regs = NULL_RTX;
4796 ptr->loads = NULL_RTX;
4797 ptr->stores = NULL_RTX;
4798 ptr->reaching_reg = NULL_RTX;
4801 ptr->hash_index = hash;
4802 pre_ldst_mems = ptr;
4808 /* Free up an individual ldst entry. */
4811 free_ldst_entry (struct ls_expr * ptr)
4813 free_INSN_LIST_list (& ptr->loads);
4814 free_INSN_LIST_list (& ptr->stores);
4819 /* Free up all memory associated with the ldst list. */
4822 free_ldst_mems (void)
4825 htab_delete (pre_ldst_table);
4826 pre_ldst_table = NULL;
4828 while (pre_ldst_mems)
4830 struct ls_expr * tmp = pre_ldst_mems;
4832 pre_ldst_mems = pre_ldst_mems->next;
4834 free_ldst_entry (tmp);
4837 pre_ldst_mems = NULL;
4840 /* Dump debugging info about the ldst list. */
4843 print_ldst_list (FILE * file)
4845 struct ls_expr * ptr;
4847 fprintf (file, "LDST list: \n");
4849 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
4851 fprintf (file, " Pattern (%3d): ", ptr->index);
4853 print_rtl (file, ptr->pattern);
4855 fprintf (file, "\n Loads : ");
4858 print_rtl (file, ptr->loads);
4860 fprintf (file, "(nil)");
4862 fprintf (file, "\n Stores : ");
4865 print_rtl (file, ptr->stores);
4867 fprintf (file, "(nil)");
4869 fprintf (file, "\n\n");
4872 fprintf (file, "\n");
4875 /* Returns 1 if X is in the list of ldst only expressions. */
4877 static struct ls_expr *
4878 find_rtx_in_ldst (rtx x)
4882 if (!pre_ldst_table)
4885 slot = htab_find_slot (pre_ldst_table, &e, NO_INSERT);
4886 if (!slot || ((struct ls_expr *)*slot)->invalid)
4888 return (struct ls_expr *) *slot;
4891 /* Return first item in the list. */
4893 static inline struct ls_expr *
4894 first_ls_expr (void)
4896 return pre_ldst_mems;
4899 /* Return the next item in the list after the specified one. */
4901 static inline struct ls_expr *
4902 next_ls_expr (struct ls_expr * ptr)
4907 /* Load Motion for loads which only kill themselves. */
4909 /* Return true if x is a simple MEM operation, with no registers or
4910 side effects. These are the types of loads we consider for the
4911 ld_motion list, otherwise we let the usual aliasing take care of it. */
4914 simple_mem (const_rtx x)
4919 if (MEM_VOLATILE_P (x))
4922 if (GET_MODE (x) == BLKmode)
4925 /* If we are handling exceptions, we must be careful with memory references
4926 that may trap. If we are not, the behavior is undefined, so we may just
4928 if (cfun->can_throw_non_call_exceptions && may_trap_p (x))
4931 if (side_effects_p (x))
4934 /* Do not consider function arguments passed on stack. */
4935 if (reg_mentioned_p (stack_pointer_rtx, x))
4938 if (flag_float_store && FLOAT_MODE_P (GET_MODE (x)))
4944 /* Make sure there isn't a buried reference in this pattern anywhere.
4945 If there is, invalidate the entry for it since we're not capable
4946 of fixing it up just yet.. We have to be sure we know about ALL
4947 loads since the aliasing code will allow all entries in the
4948 ld_motion list to not-alias itself. If we miss a load, we will get
4949 the wrong value since gcse might common it and we won't know to
4953 invalidate_any_buried_refs (rtx x)
4957 struct ls_expr * ptr;
4959 /* Invalidate it in the list. */
4960 if (MEM_P (x) && simple_mem (x))
4962 ptr = ldst_entry (x);
4966 /* Recursively process the insn. */
4967 fmt = GET_RTX_FORMAT (GET_CODE (x));
4969 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
4972 invalidate_any_buried_refs (XEXP (x, i));
4973 else if (fmt[i] == 'E')
4974 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4975 invalidate_any_buried_refs (XVECEXP (x, i, j));
4979 /* Find all the 'simple' MEMs which are used in LOADs and STORES. Simple
4980 being defined as MEM loads and stores to symbols, with no side effects
4981 and no registers in the expression. For a MEM destination, we also
4982 check that the insn is still valid if we replace the destination with a
4983 REG, as is done in update_ld_motion_stores. If there are any uses/defs
4984 which don't match this criteria, they are invalidated and trimmed out
4988 compute_ld_motion_mems (void)
4990 struct ls_expr * ptr;
4994 pre_ldst_mems = NULL;
4995 pre_ldst_table = htab_create (13, pre_ldst_expr_hash,
4996 pre_ldst_expr_eq, NULL);
5000 FOR_BB_INSNS (bb, insn)
5002 if (NONDEBUG_INSN_P (insn))
5004 if (GET_CODE (PATTERN (insn)) == SET)
5006 rtx src = SET_SRC (PATTERN (insn));
5007 rtx dest = SET_DEST (PATTERN (insn));
5009 /* Check for a simple LOAD... */
5010 if (MEM_P (src) && simple_mem (src))
5012 ptr = ldst_entry (src);
5014 ptr->loads = alloc_INSN_LIST (insn, ptr->loads);
5020 /* Make sure there isn't a buried load somewhere. */
5021 invalidate_any_buried_refs (src);
5024 /* Check for stores. Don't worry about aliased ones, they
5025 will block any movement we might do later. We only care
5026 about this exact pattern since those are the only
5027 circumstance that we will ignore the aliasing info. */
5028 if (MEM_P (dest) && simple_mem (dest))
5030 ptr = ldst_entry (dest);
5033 && GET_CODE (src) != ASM_OPERANDS
5034 /* Check for REG manually since want_to_gcse_p
5035 returns 0 for all REGs. */
5036 && can_assign_to_reg_without_clobbers_p (src))
5037 ptr->stores = alloc_INSN_LIST (insn, ptr->stores);
5043 invalidate_any_buried_refs (PATTERN (insn));
5049 /* Remove any references that have been either invalidated or are not in the
5050 expression list for pre gcse. */
5053 trim_ld_motion_mems (void)
5055 struct ls_expr * * last = & pre_ldst_mems;
5056 struct ls_expr * ptr = pre_ldst_mems;
5062 /* Delete if entry has been made invalid. */
5065 /* Delete if we cannot find this mem in the expression list. */
5066 unsigned int hash = ptr->hash_index % expr_hash_table.size;
5068 for (expr = expr_hash_table.table[hash];
5070 expr = expr->next_same_hash)
5071 if (expr_equiv_p (expr->expr, ptr->pattern))
5075 expr = (struct expr *) 0;
5079 /* Set the expression field if we are keeping it. */
5087 htab_remove_elt_with_hash (pre_ldst_table, ptr, ptr->hash_index);
5088 free_ldst_entry (ptr);
5093 /* Show the world what we've found. */
5094 if (dump_file && pre_ldst_mems != NULL)
5095 print_ldst_list (dump_file);
5098 /* This routine will take an expression which we are replacing with
5099 a reaching register, and update any stores that are needed if
5100 that expression is in the ld_motion list. Stores are updated by
5101 copying their SRC to the reaching register, and then storing
5102 the reaching register into the store location. These keeps the
5103 correct value in the reaching register for the loads. */
5106 update_ld_motion_stores (struct expr * expr)
5108 struct ls_expr * mem_ptr;
5110 if ((mem_ptr = find_rtx_in_ldst (expr->expr)))
5112 /* We can try to find just the REACHED stores, but is shouldn't
5113 matter to set the reaching reg everywhere... some might be
5114 dead and should be eliminated later. */
5116 /* We replace (set mem expr) with (set reg expr) (set mem reg)
5117 where reg is the reaching reg used in the load. We checked in
5118 compute_ld_motion_mems that we can replace (set mem expr) with
5119 (set reg expr) in that insn. */
5120 rtx list = mem_ptr->stores;
5122 for ( ; list != NULL_RTX; list = XEXP (list, 1))
5124 rtx insn = XEXP (list, 0);
5125 rtx pat = PATTERN (insn);
5126 rtx src = SET_SRC (pat);
5127 rtx reg = expr->reaching_reg;
5130 /* If we've already copied it, continue. */
5131 if (expr->reaching_reg == src)
5136 fprintf (dump_file, "PRE: store updated with reaching reg ");
5137 print_rtl (dump_file, expr->reaching_reg);
5138 fprintf (dump_file, ":\n ");
5139 print_inline_rtx (dump_file, insn, 8);
5140 fprintf (dump_file, "\n");
5143 copy = gen_move_insn (reg, copy_rtx (SET_SRC (pat)));
5144 emit_insn_before (copy, insn);
5145 SET_SRC (pat) = reg;
5146 df_insn_rescan (insn);
5148 /* un-recognize this pattern since it's probably different now. */
5149 INSN_CODE (insn) = -1;
5150 gcse_create_count++;
5155 /* Return true if the graph is too expensive to optimize. PASS is the
5156 optimization about to be performed. */
5159 is_too_expensive (const char *pass)
5161 /* Trying to perform global optimizations on flow graphs which have
5162 a high connectivity will take a long time and is unlikely to be
5163 particularly useful.
5165 In normal circumstances a cfg should have about twice as many
5166 edges as blocks. But we do not want to punish small functions
5167 which have a couple switch statements. Rather than simply
5168 threshold the number of blocks, uses something with a more
5169 graceful degradation. */
5170 if (n_edges > 20000 + n_basic_blocks * 4)
5172 warning (OPT_Wdisabled_optimization,
5173 "%s: %d basic blocks and %d edges/basic block",
5174 pass, n_basic_blocks, n_edges / n_basic_blocks);
5179 /* If allocating memory for the cprop bitmap would take up too much
5180 storage it's better just to disable the optimization. */
5182 * SBITMAP_SET_SIZE (max_reg_num ())
5183 * sizeof (SBITMAP_ELT_TYPE)) > MAX_GCSE_MEMORY)
5185 warning (OPT_Wdisabled_optimization,
5186 "%s: %d basic blocks and %d registers",
5187 pass, n_basic_blocks, max_reg_num ());
5196 /* Main function for the CPROP pass. */
5199 one_cprop_pass (void)
5203 /* Return if there's nothing to do, or it is too expensive. */
5204 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
5205 || is_too_expensive (_ ("const/copy propagation disabled")))
5208 global_const_prop_count = local_const_prop_count = 0;
5209 global_copy_prop_count = local_copy_prop_count = 0;
5212 gcc_obstack_init (&gcse_obstack);
5215 /* Do a local const/copy propagation pass first. The global pass
5216 only handles global opportunities.
5217 If the local pass changes something, remove any unreachable blocks
5218 because the CPROP global dataflow analysis may get into infinite
5219 loops for CFGs with unreachable blocks.
5221 FIXME: This local pass should not be necessary after CSE (but for
5222 some reason it still is). It is also (proven) not necessary
5223 to run the local pass right after FWPWOP.
5225 FIXME: The global analysis would not get into infinite loops if it
5226 would use the DF solver (via df_simple_dataflow) instead of
5227 the solver implemented in this file. */
5228 if (local_cprop_pass ())
5230 delete_unreachable_blocks ();
5234 /* Determine implicit sets. */
5235 implicit_sets = XCNEWVEC (rtx, last_basic_block);
5236 find_implicit_sets ();
5238 alloc_hash_table (&set_hash_table, 1);
5239 compute_hash_table (&set_hash_table);
5241 /* Free implicit_sets before peak usage. */
5242 free (implicit_sets);
5243 implicit_sets = NULL;
5246 dump_hash_table (dump_file, "SET", &set_hash_table);
5247 if (set_hash_table.n_elems > 0)
5252 alloc_cprop_mem (last_basic_block, set_hash_table.n_elems);
5253 compute_cprop_data ();
5255 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb, EXIT_BLOCK_PTR, next_bb)
5257 /* Reset tables used to keep track of what's still valid [since
5258 the start of the block]. */
5259 reset_opr_set_tables ();
5261 FOR_BB_INSNS (bb, insn)
5264 changed |= cprop_insn (insn);
5266 /* Keep track of everything modified by this insn. */
5267 /* ??? Need to be careful w.r.t. mods done to INSN.
5268 Don't call mark_oprs_set if we turned the
5269 insn into a NOTE. */
5270 if (! NOTE_P (insn))
5271 mark_oprs_set (insn);
5275 changed |= bypass_conditional_jumps ();
5279 free_hash_table (&set_hash_table);
5281 obstack_free (&gcse_obstack, NULL);
5285 fprintf (dump_file, "CPROP of %s, %d basic blocks, %d bytes needed, ",
5286 current_function_name (), n_basic_blocks, bytes_used);
5287 fprintf (dump_file, "%d local const props, %d local copy props, ",
5288 local_const_prop_count, local_copy_prop_count);
5289 fprintf (dump_file, "%d global const props, %d global copy props\n\n",
5290 global_const_prop_count, global_copy_prop_count);
5297 /* All the passes implemented in this file. Each pass has its
5298 own gate and execute function, and at the end of the file a
5299 pass definition for passes.c.
5301 We do not construct an accurate cfg in functions which call
5302 setjmp, so none of these passes runs if the function calls
5304 FIXME: Should just handle setjmp via REG_SETJMP notes. */
5307 gate_rtl_cprop (void)
5309 return optimize > 0 && flag_gcse
5310 && !cfun->calls_setjmp
5315 execute_rtl_cprop (void)
5318 delete_unreachable_blocks ();
5319 df_set_flags (DF_LR_RUN_DCE);
5321 changed = one_cprop_pass ();
5322 flag_rerun_cse_after_global_opts |= changed;
5331 return optimize > 0 && flag_gcse
5332 && !cfun->calls_setjmp
5333 && optimize_function_for_speed_p (cfun)
5338 execute_rtl_pre (void)
5341 delete_unreachable_blocks ();
5343 changed = one_pre_gcse_pass ();
5344 flag_rerun_cse_after_global_opts |= changed;
5351 gate_rtl_hoist (void)
5353 return optimize > 0 && flag_gcse
5354 && !cfun->calls_setjmp
5355 /* It does not make sense to run code hoisting unless we are optimizing
5356 for code size -- it rarely makes programs faster, and can make then
5357 bigger if we did PRE (when optimizing for space, we don't run PRE). */
5358 && optimize_function_for_size_p (cfun)
5363 execute_rtl_hoist (void)
5366 delete_unreachable_blocks ();
5368 changed = one_code_hoisting_pass ();
5369 flag_rerun_cse_after_global_opts |= changed;
5375 struct rtl_opt_pass pass_rtl_cprop =
5380 gate_rtl_cprop, /* gate */
5381 execute_rtl_cprop, /* execute */
5384 0, /* static_pass_number */
5385 TV_CPROP, /* tv_id */
5386 PROP_cfglayout, /* properties_required */
5387 0, /* properties_provided */
5388 0, /* properties_destroyed */
5389 0, /* todo_flags_start */
5390 TODO_df_finish | TODO_verify_rtl_sharing |
5392 TODO_verify_flow | TODO_ggc_collect /* todo_flags_finish */
5396 struct rtl_opt_pass pass_rtl_pre =
5400 "rtl pre", /* name */
5401 gate_rtl_pre, /* gate */
5402 execute_rtl_pre, /* execute */
5405 0, /* static_pass_number */
5407 PROP_cfglayout, /* properties_required */
5408 0, /* properties_provided */
5409 0, /* properties_destroyed */
5410 0, /* todo_flags_start */
5411 TODO_df_finish | TODO_verify_rtl_sharing |
5413 TODO_verify_flow | TODO_ggc_collect /* todo_flags_finish */
5417 struct rtl_opt_pass pass_rtl_hoist =
5422 gate_rtl_hoist, /* gate */
5423 execute_rtl_hoist, /* execute */
5426 0, /* static_pass_number */
5427 TV_HOIST, /* tv_id */
5428 PROP_cfglayout, /* properties_required */
5429 0, /* properties_provided */
5430 0, /* properties_destroyed */
5431 0, /* todo_flags_start */
5432 TODO_df_finish | TODO_verify_rtl_sharing |
5434 TODO_verify_flow | TODO_ggc_collect /* todo_flags_finish */
5438 #include "gt-gcse.h"