1 /* Global common subexpression elimination/Partial redundancy elimination
2 and global constant/copy propagation for GNU compiler.
3 Copyright (C) 1997, 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
23 - reordering of memory allocation and freeing to be more space efficient
24 - do rough calc of how many regs are needed in each block, and a rough
25 calc of how many regs are available in each class and use that to
26 throttle back the code in cases where RTX_COST is minimal.
27 - a store to the same address as a load does not kill the load if the
28 source of the store is also the destination of the load. Handling this
29 allows more load motion, particularly out of loops.
30 - ability to realloc sbitmap vectors would allow one initial computation
31 of reg_set_in_block with only subsequent additions, rather than
32 recomputing it for each pass
36 /* References searched while implementing this.
38 Compilers Principles, Techniques and Tools
42 Global Optimization by Suppression of Partial Redundancies
44 communications of the acm, Vol. 22, Num. 2, Feb. 1979
46 A Portable Machine-Independent Global Optimizer - Design and Measurements
48 Stanford Ph.D. thesis, Dec. 1983
50 A Fast Algorithm for Code Movement Optimization
52 SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988
54 A Solution to a Problem with Morel and Renvoise's
55 Global Optimization by Suppression of Partial Redundancies
56 K-H Drechsler, M.P. Stadel
57 ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988
59 Practical Adaptation of the Global Optimization
60 Algorithm of Morel and Renvoise
62 ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991
64 Efficiently Computing Static Single Assignment Form and the Control
66 R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck
67 ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991
70 J. Knoop, O. Ruthing, B. Steffen
71 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
73 What's In a Region? Or Computing Control Dependence Regions in Near-Linear
74 Time for Reducible Flow Control
76 ACM Letters on Programming Languages and Systems,
77 Vol. 2, Num. 1-4, Mar-Dec 1993
79 An Efficient Representation for Sparse Sets
80 Preston Briggs, Linda Torczon
81 ACM Letters on Programming Languages and Systems,
82 Vol. 2, Num. 1-4, Mar-Dec 1993
84 A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion
85 K-H Drechsler, M.P. Stadel
86 ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993
88 Partial Dead Code Elimination
89 J. Knoop, O. Ruthing, B. Steffen
90 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
92 Effective Partial Redundancy Elimination
93 P. Briggs, K.D. Cooper
94 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
96 The Program Structure Tree: Computing Control Regions in Linear Time
97 R. Johnson, D. Pearson, K. Pingali
98 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
100 Optimal Code Motion: Theory and Practice
101 J. Knoop, O. Ruthing, B. Steffen
102 ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994
104 The power of assignment motion
105 J. Knoop, O. Ruthing, B. Steffen
106 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
108 Global code motion / global value numbering
110 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
112 Value Driven Redundancy Elimination
114 Rice University Ph.D. thesis, Apr. 1996
118 Massively Scalar Compiler Project, Rice University, Sep. 1996
120 High Performance Compilers for Parallel Computing
124 Advanced Compiler Design and Implementation
126 Morgan Kaufmann, 1997
128 Building an Optimizing Compiler
132 People wishing to speed up the code here should read:
133 Elimination Algorithms for Data Flow Analysis
134 B.G. Ryder, M.C. Paull
135 ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986
137 How to Analyze Large Programs Efficiently and Informatively
138 D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck
139 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
141 People wishing to do something different can find various possibilities
142 in the above papers and elsewhere.
152 #include "hard-reg-set.h"
155 #include "insn-config.h"
157 #include "basic-block.h"
159 #include "function.h"
165 #define obstack_chunk_alloc gmalloc
166 #define obstack_chunk_free free
168 /* Propagate flow information through back edges and thus enable PRE's
169 moving loop invariant calculations out of loops.
171 Originally this tended to create worse overall code, but several
172 improvements during the development of PRE seem to have made following
173 back edges generally a win.
175 Note much of the loop invariant code motion done here would normally
176 be done by loop.c, which has more heuristics for when to move invariants
177 out of loops. At some point we might need to move some of those
178 heuristics into gcse.c. */
179 #define FOLLOW_BACK_EDGES 1
181 /* We support GCSE via Partial Redundancy Elimination. PRE optimizations
182 are a superset of those done by GCSE.
184 We perform the following steps:
186 1) Compute basic block information.
188 2) Compute table of places where registers are set.
190 3) Perform copy/constant propagation.
192 4) Perform global cse.
194 5) Perform another pass of copy/constant propagation.
196 Two passes of copy/constant propagation are done because the first one
197 enables more GCSE and the second one helps to clean up the copies that
198 GCSE creates. This is needed more for PRE than for Classic because Classic
199 GCSE will try to use an existing register containing the common
200 subexpression rather than create a new one. This is harder to do for PRE
201 because of the code motion (which Classic GCSE doesn't do).
203 Expressions we are interested in GCSE-ing are of the form
204 (set (pseudo-reg) (expression)).
205 Function want_to_gcse_p says what these are.
207 PRE handles moving invariant expressions out of loops (by treating them as
208 partially redundant).
210 Eventually it would be nice to replace cse.c/gcse.c with SSA (static single
211 assignment) based GVN (global value numbering). L. T. Simpson's paper
212 (Rice University) on value numbering is a useful reference for this.
214 **********************
216 We used to support multiple passes but there are diminishing returns in
217 doing so. The first pass usually makes 90% of the changes that are doable.
218 A second pass can make a few more changes made possible by the first pass.
219 Experiments show any further passes don't make enough changes to justify
222 A study of spec92 using an unlimited number of passes:
223 [1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83,
224 [6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2,
225 [12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1
227 It was found doing copy propagation between each pass enables further
230 PRE is quite expensive in complicated functions because the DFA can take
231 awhile to converge. Hence we only perform one pass. The parameter max-gcse-passes can
232 be modified if one wants to experiment.
234 **********************
236 The steps for PRE are:
238 1) Build the hash table of expressions we wish to GCSE (expr_hash_table).
240 2) Perform the data flow analysis for PRE.
242 3) Delete the redundant instructions
244 4) Insert the required copies [if any] that make the partially
245 redundant instructions fully redundant.
247 5) For other reaching expressions, insert an instruction to copy the value
248 to a newly created pseudo that will reach the redundant instruction.
250 The deletion is done first so that when we do insertions we
251 know which pseudo reg to use.
253 Various papers have argued that PRE DFA is expensive (O(n^2)) and others
254 argue it is not. The number of iterations for the algorithm to converge
255 is typically 2-4 so I don't view it as that expensive (relatively speaking).
257 PRE GCSE depends heavily on the second CSE pass to clean up the copies
258 we create. To make an expression reach the place where it's redundant,
259 the result of the expression is copied to a new register, and the redundant
260 expression is deleted by replacing it with this new register. Classic GCSE
261 doesn't have this problem as much as it computes the reaching defs of
262 each register in each block and thus can try to use an existing register.
264 **********************
266 A fair bit of simplicity is created by creating small functions for simple
267 tasks, even when the function is only called in one place. This may
268 measurably slow things down [or may not] by creating more function call
269 overhead than is necessary. The source is laid out so that it's trivial
270 to make the affected functions inline so that one can measure what speed
271 up, if any, can be achieved, and maybe later when things settle things can
274 Help stamp out big monolithic functions! */
276 /* GCSE global vars. */
279 static FILE *gcse_file;
281 /* Note whether or not we should run jump optimization after gcse. We
282 want to do this for two cases.
284 * If we changed any jumps via cprop.
286 * If we added any labels via edge splitting. */
288 static int run_jump_opt_after_gcse;
290 /* Bitmaps are normally not included in debugging dumps.
291 However it's useful to be able to print them from GDB.
292 We could create special functions for this, but it's simpler to
293 just allow passing stderr to the dump_foo fns. Since stderr can
294 be a macro, we store a copy here. */
295 static FILE *debug_stderr;
297 /* An obstack for our working variables. */
298 static struct obstack gcse_obstack;
300 /* Non-zero for each mode that supports (set (reg) (reg)).
301 This is trivially true for integer and floating point values.
302 It may or may not be true for condition codes. */
303 static char can_copy_p[(int) NUM_MACHINE_MODES];
305 /* Non-zero if can_copy_p has been initialized. */
306 static int can_copy_init_p;
308 /* Dataflow analyzer */
309 struct df *df_analyzer;
312 struct reg_use {rtx reg_rtx; };
314 /* Hash table of expressions. */
318 /* The expression (SET_SRC for expressions, PATTERN for assignments). */
320 /* Index in the available expression bitmaps. */
322 /* Next entry with the same hash. */
323 struct expr *next_same_hash;
324 /* List of anticipatable occurrences in basic blocks in the function.
325 An "anticipatable occurrence" is one that is the first occurrence in the
326 basic block, the operands are not modified in the basic block prior
327 to the occurrence and the output is not used between the start of
328 the block and the occurrence. */
329 struct occr *antic_occr;
330 /* List of available occurrence in basic blocks in the function.
331 An "available occurrence" is one that is the last occurrence in the
332 basic block and the operands are not modified by following statements in
333 the basic block [including this insn]. */
334 struct occr *avail_occr;
335 /* Non-null if the computation is PRE redundant.
336 The value is the newly created pseudo-reg to record a copy of the
337 expression in all the places that reach the redundant copy. */
341 /* Occurrence of an expression.
342 There is one per basic block. If a pattern appears more than once the
343 last appearance is used [or first for anticipatable expressions]. */
347 /* Next occurrence of this expression. */
349 /* The insn that computes the expression. */
351 /* Non-zero if this [anticipatable] occurrence has been deleted. */
353 /* Non-zero if this [available] occurrence has been copied to
355 /* ??? This is mutually exclusive with deleted_p, so they could share
360 /* Expression and copy propagation hash tables.
361 Each hash table is an array of buckets.
362 ??? It is known that if it were an array of entries, structure elements
363 `next_same_hash' and `bitmap_index' wouldn't be necessary. However, it is
364 not clear whether in the final analysis a sufficient amount of memory would
365 be saved as the size of the available expression bitmaps would be larger
366 [one could build a mapping table without holes afterwards though].
367 Someday I'll perform the computation and figure it out. */
369 /* Total size of the expression hash table, in elements. */
370 static unsigned int expr_hash_table_size;
373 This is an array of `expr_hash_table_size' elements. */
374 static struct expr **expr_hash_table;
376 /* Total size of the copy propagation hash table, in elements. */
377 static unsigned int set_hash_table_size;
380 This is an array of `set_hash_table_size' elements. */
381 static struct expr **set_hash_table;
383 /* Mapping of uids to cuids.
384 Only real insns get cuids. */
385 static int *uid_cuid;
387 /* Highest UID in UID_CUID. */
390 /* Get the cuid of an insn. */
391 #ifdef ENABLE_CHECKING
392 #define INSN_CUID(INSN) (INSN_UID (INSN) > max_uid ? (abort (), 0) : uid_cuid[INSN_UID (INSN)])
394 #define INSN_CUID(INSN) (uid_cuid[INSN_UID (INSN)])
397 /* Number of cuids. */
400 /* Mapping of cuids to insns. */
401 static rtx *cuid_insn;
403 /* Get insn from cuid. */
404 #define CUID_INSN(CUID) (cuid_insn[CUID])
406 /* Maximum register number in function prior to doing gcse + 1.
407 Registers created during this pass have regno >= max_gcse_regno.
408 This is named with "gcse" to not collide with global of same name. */
409 static unsigned int max_gcse_regno;
411 /* Maximum number of cse-able expressions found. */
414 /* Maximum number of assignments for copy propagation found. */
417 /* Table of registers that are modified.
419 For each register, each element is a list of places where the pseudo-reg
422 For simplicity, GCSE is done on sets of pseudo-regs only. PRE GCSE only
423 requires knowledge of which blocks kill which regs [and thus could use
424 a bitmap instead of the lists `reg_set_table' uses].
426 `reg_set_table' and could be turned into an array of bitmaps (num-bbs x
427 num-regs) [however perhaps it may be useful to keep the data as is]. One
428 advantage of recording things this way is that `reg_set_table' is fairly
429 sparse with respect to pseudo regs but for hard regs could be fairly dense
430 [relatively speaking]. And recording sets of pseudo-regs in lists speeds
431 up functions like compute_transp since in the case of pseudo-regs we only
432 need to iterate over the number of times a pseudo-reg is set, not over the
433 number of basic blocks [clearly there is a bit of a slow down in the cases
434 where a pseudo is set more than once in a block, however it is believed
435 that the net effect is to speed things up]. This isn't done for hard-regs
436 because recording call-clobbered hard-regs in `reg_set_table' at each
437 function call can consume a fair bit of memory, and iterating over
438 hard-regs stored this way in compute_transp will be more expensive. */
440 typedef struct reg_set
442 /* The next setting of this register. */
443 struct reg_set *next;
444 /* The insn where it was set. */
448 static reg_set **reg_set_table;
450 /* Size of `reg_set_table'.
451 The table starts out at max_gcse_regno + slop, and is enlarged as
453 static int reg_set_table_size;
455 /* Amount to grow `reg_set_table' by when it's full. */
456 #define REG_SET_TABLE_SLOP 100
458 /* This is a list of expressions which are MEMs and will be used by load
460 Load motion tracks MEMs which aren't killed by
461 anything except itself. (ie, loads and stores to a single location).
462 We can then allow movement of these MEM refs with a little special
463 allowance. (all stores copy the same value to the reaching reg used
464 for the loads). This means all values used to store into memory must have
465 no side effects so we can re-issue the setter value.
466 Store Motion uses this structure as an expression table to track stores
467 which look interesting, and might be moveable towards the exit block. */
471 struct expr * expr; /* Gcse expression reference for LM. */
472 rtx pattern; /* Pattern of this mem. */
473 rtx loads; /* INSN list for where load appears */
474 rtx stores; /* INSN list for where store appears */
475 struct ls_expr * next; /* Next in the list. */
476 int invalid; /* Invalid for some reason. */
477 int index; /* If it maps to a bitmap index. */
478 int hash_index; /* Index when in a hash table. */
479 rtx reaching_reg; /* Register to use when re-writing. */
482 /* Head of the list of load/store memory refs. */
483 static struct ls_expr * pre_ldst_mems = NULL;
485 /* Bitmap containing one bit for each register in the program.
486 Used when performing GCSE to track which registers have been set since
487 the start of the basic block. */
488 static sbitmap reg_set_bitmap;
490 /* For each block, a bitmap of registers set in the block.
491 This is used by expr_killed_p and compute_transp.
492 It is computed during hash table computation and not by compute_sets
493 as it includes registers added since the last pass (or between cprop and
494 gcse) and it's currently not easy to realloc sbitmap vectors. */
495 static sbitmap *reg_set_in_block;
497 /* Array, indexed by basic block number for a list of insns which modify
498 memory within that block. */
499 static rtx * modify_mem_list;
501 /* This array parallels modify_mem_list, but is kept canonicalized. */
502 static rtx * canon_modify_mem_list;
503 /* Various variables for statistics gathering. */
505 /* Memory used in a pass.
506 This isn't intended to be absolutely precise. Its intent is only
507 to keep an eye on memory usage. */
508 static int bytes_used;
510 /* GCSE substitutions made. */
511 static int gcse_subst_count;
512 /* Number of copy instructions created. */
513 static int gcse_create_count;
514 /* Number of constants propagated. */
515 static int const_prop_count;
516 /* Number of copys propagated. */
517 static int copy_prop_count;
519 /* These variables are used by classic GCSE.
520 Normally they'd be defined a bit later, but `rd_gen' needs to
521 be declared sooner. */
523 /* Each block has a bitmap of each type.
524 The length of each blocks bitmap is:
526 max_cuid - for reaching definitions
527 n_exprs - for available expressions
529 Thus we view the bitmaps as 2 dimensional arrays. i.e.
530 rd_kill[block_num][cuid_num]
531 ae_kill[block_num][expr_num] */
533 /* For reaching defs */
534 static sbitmap *rd_kill, *rd_gen, *reaching_defs, *rd_out;
536 /* for available exprs */
537 static sbitmap *ae_kill, *ae_gen, *ae_in, *ae_out;
539 /* Objects of this type are passed around by the null-pointer check
541 struct null_pointer_info
543 /* The basic block being processed. */
545 /* The first register to be handled in this pass. */
546 unsigned int min_reg;
547 /* One greater than the last register to be handled in this pass. */
548 unsigned int max_reg;
549 sbitmap *nonnull_local;
550 sbitmap *nonnull_killed;
553 static void compute_can_copy PARAMS ((void));
554 static char *gmalloc PARAMS ((unsigned int));
555 static char *grealloc PARAMS ((char *, unsigned int));
556 static char *gcse_alloc PARAMS ((unsigned long));
557 static void alloc_gcse_mem PARAMS ((rtx));
558 static void free_gcse_mem PARAMS ((void));
559 static void alloc_reg_set_mem PARAMS ((int));
560 static void free_reg_set_mem PARAMS ((void));
561 static int get_bitmap_width PARAMS ((int, int, int));
562 static void record_one_set PARAMS ((int, rtx));
563 static void record_set_info PARAMS ((rtx, rtx, void *));
564 static void compute_sets PARAMS ((rtx));
565 static void hash_scan_insn PARAMS ((rtx, int, int));
566 static void hash_scan_set PARAMS ((rtx, rtx, int));
567 static void hash_scan_clobber PARAMS ((rtx, rtx));
568 static void hash_scan_call PARAMS ((rtx, rtx));
569 static int want_to_gcse_p PARAMS ((rtx));
570 static int oprs_unchanged_p PARAMS ((rtx, rtx, int));
571 static int oprs_anticipatable_p PARAMS ((rtx, rtx));
572 static int oprs_available_p PARAMS ((rtx, rtx));
573 static void insert_expr_in_table PARAMS ((rtx, enum machine_mode, rtx,
575 static void insert_set_in_table PARAMS ((rtx, rtx));
576 static unsigned int hash_expr PARAMS ((rtx, enum machine_mode, int *, int));
577 static unsigned int hash_expr_1 PARAMS ((rtx, enum machine_mode, int *));
578 static unsigned int hash_string_1 PARAMS ((const char *));
579 static unsigned int hash_set PARAMS ((int, int));
580 static int expr_equiv_p PARAMS ((rtx, rtx));
581 static void record_last_reg_set_info PARAMS ((rtx, int));
582 static void record_last_mem_set_info PARAMS ((rtx));
583 static void record_last_set_info PARAMS ((rtx, rtx, void *));
584 static void compute_hash_table PARAMS ((int));
585 static void alloc_set_hash_table PARAMS ((int));
586 static void free_set_hash_table PARAMS ((void));
587 static void compute_set_hash_table PARAMS ((void));
588 static void alloc_expr_hash_table PARAMS ((unsigned int));
589 static void free_expr_hash_table PARAMS ((void));
590 static void compute_expr_hash_table PARAMS ((void));
591 static void dump_hash_table PARAMS ((FILE *, const char *, struct expr **,
593 static struct expr *lookup_expr PARAMS ((rtx));
594 static struct expr *lookup_set PARAMS ((unsigned int, rtx));
595 static struct expr *next_set PARAMS ((unsigned int, struct expr *));
596 static void reset_opr_set_tables PARAMS ((void));
597 static int oprs_not_set_p PARAMS ((rtx, rtx));
598 static void mark_call PARAMS ((rtx));
599 static void mark_set PARAMS ((rtx, rtx));
600 static void mark_clobber PARAMS ((rtx, rtx));
601 static void mark_oprs_set PARAMS ((rtx));
602 static void alloc_cprop_mem PARAMS ((int, int));
603 static void free_cprop_mem PARAMS ((void));
604 static void compute_transp PARAMS ((rtx, int, sbitmap *, int));
605 static void compute_transpout PARAMS ((void));
606 static void compute_local_properties PARAMS ((sbitmap *, sbitmap *, sbitmap *,
608 static void compute_cprop_data PARAMS ((void));
609 static void find_used_regs PARAMS ((rtx *, void *));
610 static int try_replace_reg PARAMS ((rtx, rtx, rtx));
611 static struct expr *find_avail_set PARAMS ((int, rtx));
612 static int cprop_jump PARAMS ((basic_block, rtx, rtx, rtx));
614 static int cprop_cc0_jump PARAMS ((basic_block, rtx, struct reg_use *, rtx));
616 static void mems_conflict_for_gcse_p PARAMS ((rtx, rtx, void *));
617 static int load_killed_in_block_p PARAMS ((basic_block, int, rtx, int));
618 static void canon_list_insert PARAMS ((rtx, rtx, void *));
619 static int cprop_insn PARAMS ((basic_block, rtx, int));
620 static int cprop PARAMS ((int));
621 static int one_cprop_pass PARAMS ((int, int));
622 static void alloc_pre_mem PARAMS ((int, int));
623 static void free_pre_mem PARAMS ((void));
624 static void compute_pre_data PARAMS ((void));
625 static int pre_expr_reaches_here_p PARAMS ((basic_block, struct expr *,
627 static void insert_insn_end_bb PARAMS ((struct expr *, basic_block, int));
628 static void pre_insert_copy_insn PARAMS ((struct expr *, rtx));
629 static void pre_insert_copies PARAMS ((void));
630 static int pre_delete PARAMS ((void));
631 static int pre_gcse PARAMS ((void));
632 static int one_pre_gcse_pass PARAMS ((int));
633 static void add_label_notes PARAMS ((rtx, rtx));
634 static void alloc_code_hoist_mem PARAMS ((int, int));
635 static void free_code_hoist_mem PARAMS ((void));
636 static void compute_code_hoist_vbeinout PARAMS ((void));
637 static void compute_code_hoist_data PARAMS ((void));
638 static int hoist_expr_reaches_here_p PARAMS ((basic_block, int, basic_block,
640 static void hoist_code PARAMS ((void));
641 static int one_code_hoisting_pass PARAMS ((void));
642 static void alloc_rd_mem PARAMS ((int, int));
643 static void free_rd_mem PARAMS ((void));
644 static void handle_rd_kill_set PARAMS ((rtx, int, basic_block));
645 static void compute_kill_rd PARAMS ((void));
646 static void compute_rd PARAMS ((void));
647 static void alloc_avail_expr_mem PARAMS ((int, int));
648 static void free_avail_expr_mem PARAMS ((void));
649 static void compute_ae_gen PARAMS ((void));
650 static int expr_killed_p PARAMS ((rtx, basic_block));
651 static void compute_ae_kill PARAMS ((sbitmap *, sbitmap *));
652 static int expr_reaches_here_p PARAMS ((struct occr *, struct expr *,
654 static rtx computing_insn PARAMS ((struct expr *, rtx));
655 static int def_reaches_here_p PARAMS ((rtx, rtx));
656 static int can_disregard_other_sets PARAMS ((struct reg_set **, rtx, int));
657 static int handle_avail_expr PARAMS ((rtx, struct expr *));
658 static int classic_gcse PARAMS ((void));
659 static int one_classic_gcse_pass PARAMS ((int));
660 static void invalidate_nonnull_info PARAMS ((rtx, rtx, void *));
661 static void delete_null_pointer_checks_1 PARAMS ((varray_type *, unsigned int *,
662 sbitmap *, sbitmap *,
663 struct null_pointer_info *));
664 static rtx process_insert_insn PARAMS ((struct expr *));
665 static int pre_edge_insert PARAMS ((struct edge_list *, struct expr **));
666 static int expr_reaches_here_p_work PARAMS ((struct occr *, struct expr *,
667 basic_block, int, char *));
668 static int pre_expr_reaches_here_p_work PARAMS ((basic_block, struct expr *,
669 basic_block, char *));
670 static struct ls_expr * ldst_entry PARAMS ((rtx));
671 static void free_ldst_entry PARAMS ((struct ls_expr *));
672 static void free_ldst_mems PARAMS ((void));
673 static void print_ldst_list PARAMS ((FILE *));
674 static struct ls_expr * find_rtx_in_ldst PARAMS ((rtx));
675 static int enumerate_ldsts PARAMS ((void));
676 static inline struct ls_expr * first_ls_expr PARAMS ((void));
677 static inline struct ls_expr * next_ls_expr PARAMS ((struct ls_expr *));
678 static int simple_mem PARAMS ((rtx));
679 static void invalidate_any_buried_refs PARAMS ((rtx));
680 static void compute_ld_motion_mems PARAMS ((void));
681 static void trim_ld_motion_mems PARAMS ((void));
682 static void update_ld_motion_stores PARAMS ((struct expr *));
683 static int store_ops_ok PARAMS ((rtx, basic_block, rtx, int));
684 static void find_moveable_store PARAMS ((rtx));
685 static int compute_store_table PARAMS ((void));
686 static int load_kills_store PARAMS ((rtx, rtx));
687 static int find_loads PARAMS ((rtx, rtx));
688 static int store_killed_in_insn PARAMS ((rtx, rtx));
689 static int store_killed_after PARAMS ((rtx, rtx, basic_block, int));
690 static int store_killed_before PARAMS ((rtx, rtx, basic_block));
691 static void build_store_vectors PARAMS ((void));
692 static void insert_insn_start_bb PARAMS ((rtx, basic_block));
693 static int insert_store PARAMS ((struct ls_expr *, edge));
694 static void replace_store_insn PARAMS ((rtx, rtx, basic_block));
695 static void delete_store PARAMS ((struct ls_expr *,
697 static void free_store_memory PARAMS ((void));
698 static void store_motion PARAMS ((void));
700 /* Entry point for global common subexpression elimination.
701 F is the first instruction in the function. */
709 /* Bytes used at start of pass. */
710 int initial_bytes_used;
711 /* Maximum number of bytes used by a pass. */
713 /* Point to release obstack data from for each pass. */
714 char *gcse_obstack_bottom;
716 /* Insertion of instructions on edges can create new basic blocks; we
717 need the original basic block count so that we can properly deallocate
718 arrays sized on the number of basic blocks originally in the cfg. */
720 /* We do not construct an accurate cfg in functions which call
721 setjmp, so just punt to be safe. */
722 if (current_function_calls_setjmp)
725 /* Assume that we do not need to run jump optimizations after gcse. */
726 run_jump_opt_after_gcse = 0;
728 /* For calling dump_foo fns from gdb. */
729 debug_stderr = stderr;
732 /* Identify the basic block information for this function, including
733 successors and predecessors. */
734 max_gcse_regno = max_reg_num ();
737 dump_flow_info (file);
739 orig_bb_count = n_basic_blocks;
740 /* Return if there's nothing to do. */
741 if (n_basic_blocks <= 1)
744 /* Trying to perform global optimizations on flow graphs which have
745 a high connectivity will take a long time and is unlikely to be
748 In normal circumstances a cfg should have about twice as many edges
749 as blocks. But we do not want to punish small functions which have
750 a couple switch statements. So we require a relatively large number
751 of basic blocks and the ratio of edges to blocks to be high. */
752 if (n_basic_blocks > 1000 && n_edges / n_basic_blocks >= 20)
754 if (warn_disabled_optimization)
755 warning ("GCSE disabled: %d > 1000 basic blocks and %d >= 20 edges/basic block",
756 n_basic_blocks, n_edges / n_basic_blocks);
760 /* If allocating memory for the cprop bitmap would take up too much
761 storage it's better just to disable the optimization. */
763 * SBITMAP_SET_SIZE (max_gcse_regno)
764 * sizeof (SBITMAP_ELT_TYPE)) > MAX_GCSE_MEMORY)
766 if (warn_disabled_optimization)
767 warning ("GCSE disabled: %d basic blocks and %d registers",
768 n_basic_blocks, max_gcse_regno);
773 /* See what modes support reg/reg copy operations. */
774 if (! can_copy_init_p)
780 gcc_obstack_init (&gcse_obstack);
784 init_alias_analysis ();
785 /* Record where pseudo-registers are set. This data is kept accurate
786 during each pass. ??? We could also record hard-reg information here
787 [since it's unchanging], however it is currently done during hash table
790 It may be tempting to compute MEM set information here too, but MEM sets
791 will be subject to code motion one day and thus we need to compute
792 information about memory sets when we build the hash tables. */
794 alloc_reg_set_mem (max_gcse_regno);
798 initial_bytes_used = bytes_used;
800 gcse_obstack_bottom = gcse_alloc (1);
802 while (changed && pass < MAX_GCSE_PASSES)
806 fprintf (file, "GCSE pass %d\n\n", pass + 1);
808 /* Initialize bytes_used to the space for the pred/succ lists,
809 and the reg_set_table data. */
810 bytes_used = initial_bytes_used;
812 /* Each pass may create new registers, so recalculate each time. */
813 max_gcse_regno = max_reg_num ();
817 /* Don't allow constant propagation to modify jumps
819 changed = one_cprop_pass (pass + 1, 0);
822 changed |= one_classic_gcse_pass (pass + 1);
825 changed |= one_pre_gcse_pass (pass + 1);
826 /* We may have just created new basic blocks. Release and
827 recompute various things which are sized on the number of
833 for (i = 0; i < orig_bb_count; i++)
835 if (modify_mem_list[i])
836 free_INSN_LIST_list (modify_mem_list + i);
837 if (canon_modify_mem_list[i])
838 free_INSN_LIST_list (canon_modify_mem_list + i);
841 = (rtx *) gmalloc (n_basic_blocks * sizeof (rtx *));
842 canon_modify_mem_list
843 = (rtx *) gmalloc (n_basic_blocks * sizeof (rtx *));
844 memset ((char *) modify_mem_list, 0, n_basic_blocks * sizeof (rtx *));
845 memset ((char *) canon_modify_mem_list, 0, n_basic_blocks * sizeof (rtx *));
846 orig_bb_count = n_basic_blocks;
849 alloc_reg_set_mem (max_reg_num ());
851 run_jump_opt_after_gcse = 1;
854 if (max_pass_bytes < bytes_used)
855 max_pass_bytes = bytes_used;
857 /* Free up memory, then reallocate for code hoisting. We can
858 not re-use the existing allocated memory because the tables
859 will not have info for the insns or registers created by
860 partial redundancy elimination. */
863 /* It does not make sense to run code hoisting unless we optimizing
864 for code size -- it rarely makes programs faster, and can make
865 them bigger if we did partial redundancy elimination (when optimizing
866 for space, we use a classic gcse algorithm instead of partial
867 redundancy algorithms). */
870 max_gcse_regno = max_reg_num ();
872 changed |= one_code_hoisting_pass ();
875 if (max_pass_bytes < bytes_used)
876 max_pass_bytes = bytes_used;
881 fprintf (file, "\n");
885 obstack_free (&gcse_obstack, gcse_obstack_bottom);
889 /* Do one last pass of copy propagation, including cprop into
890 conditional jumps. */
892 max_gcse_regno = max_reg_num ();
894 /* This time, go ahead and allow cprop to alter jumps. */
895 one_cprop_pass (pass + 1, 1);
900 fprintf (file, "GCSE of %s: %d basic blocks, ",
901 current_function_name, n_basic_blocks);
902 fprintf (file, "%d pass%s, %d bytes\n\n",
903 pass, pass > 1 ? "es" : "", max_pass_bytes);
906 obstack_free (&gcse_obstack, NULL);
908 /* We are finished with alias. */
909 end_alias_analysis ();
910 allocate_reg_info (max_reg_num (), FALSE, FALSE);
912 if (!optimize_size && flag_gcse_sm)
914 /* Record where pseudo-registers are set. */
915 return run_jump_opt_after_gcse;
918 /* Misc. utilities. */
920 /* Compute which modes support reg/reg copy operations. */
926 #ifndef AVOID_CCMODE_COPIES
929 memset (can_copy_p, 0, NUM_MACHINE_MODES);
932 for (i = 0; i < NUM_MACHINE_MODES; i++)
933 if (GET_MODE_CLASS (i) == MODE_CC)
935 #ifdef AVOID_CCMODE_COPIES
938 reg = gen_rtx_REG ((enum machine_mode) i, LAST_VIRTUAL_REGISTER + 1);
939 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, reg));
940 if (recog (PATTERN (insn), insn, NULL) >= 0)
950 /* Cover function to xmalloc to record bytes allocated. */
957 return xmalloc (size);
960 /* Cover function to xrealloc.
961 We don't record the additional size since we don't know it.
962 It won't affect memory usage stats much anyway. */
969 return xrealloc (ptr, size);
972 /* Cover function to obstack_alloc.
973 We don't need to record the bytes allocated here since
974 obstack_chunk_alloc is set to gmalloc. */
980 return (char *) obstack_alloc (&gcse_obstack, size);
983 /* Allocate memory for the cuid mapping array,
984 and reg/memory set tracking tables.
986 This is called at the start of each pass. */
995 /* Find the largest UID and create a mapping from UIDs to CUIDs.
996 CUIDs are like UIDs except they increase monotonically, have no gaps,
997 and only apply to real insns. */
999 max_uid = get_max_uid ();
1000 n = (max_uid + 1) * sizeof (int);
1001 uid_cuid = (int *) gmalloc (n);
1002 memset ((char *) uid_cuid, 0, n);
1003 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
1006 uid_cuid[INSN_UID (insn)] = i++;
1008 uid_cuid[INSN_UID (insn)] = i;
1011 /* Create a table mapping cuids to insns. */
1014 n = (max_cuid + 1) * sizeof (rtx);
1015 cuid_insn = (rtx *) gmalloc (n);
1016 memset ((char *) cuid_insn, 0, n);
1017 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
1019 CUID_INSN (i++) = insn;
1021 /* Allocate vars to track sets of regs. */
1022 reg_set_bitmap = (sbitmap) sbitmap_alloc (max_gcse_regno);
1024 /* Allocate vars to track sets of regs, memory per block. */
1025 reg_set_in_block = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks,
1027 /* Allocate array to keep a list of insns which modify memory in each
1029 modify_mem_list = (rtx *) gmalloc (n_basic_blocks * sizeof (rtx *));
1030 canon_modify_mem_list = (rtx *) gmalloc (n_basic_blocks * sizeof (rtx *));
1031 memset ((char *) modify_mem_list, 0, n_basic_blocks * sizeof (rtx *));
1032 memset ((char *) canon_modify_mem_list, 0, n_basic_blocks * sizeof (rtx *));
1035 /* Free memory allocated by alloc_gcse_mem. */
1043 free (reg_set_bitmap);
1045 sbitmap_vector_free (reg_set_in_block);
1046 /* re-Cache any INSN_LIST nodes we have allocated. */
1050 for (i = 0; i < n_basic_blocks; i++)
1052 if (modify_mem_list[i])
1053 free_INSN_LIST_list (modify_mem_list + i);
1054 if (canon_modify_mem_list[i])
1055 free_INSN_LIST_list (canon_modify_mem_list + i);
1058 free (modify_mem_list);
1059 free (canon_modify_mem_list);
1060 modify_mem_list = 0;
1061 canon_modify_mem_list = 0;
1065 /* Many of the global optimization algorithms work by solving dataflow
1066 equations for various expressions. Initially, some local value is
1067 computed for each expression in each block. Then, the values across the
1068 various blocks are combined (by following flow graph edges) to arrive at
1069 global values. Conceptually, each set of equations is independent. We
1070 may therefore solve all the equations in parallel, solve them one at a
1071 time, or pick any intermediate approach.
1073 When you're going to need N two-dimensional bitmaps, each X (say, the
1074 number of blocks) by Y (say, the number of expressions), call this
1075 function. It's not important what X and Y represent; only that Y
1076 correspond to the things that can be done in parallel. This function will
1077 return an appropriate chunking factor C; you should solve C sets of
1078 equations in parallel. By going through this function, we can easily
1079 trade space against time; by solving fewer equations in parallel we use
1083 get_bitmap_width (n, x, y)
1088 /* It's not really worth figuring out *exactly* how much memory will
1089 be used by a particular choice. The important thing is to get
1090 something approximately right. */
1091 size_t max_bitmap_memory = 10 * 1024 * 1024;
1093 /* The number of bytes we'd use for a single column of minimum
1095 size_t column_size = n * x * sizeof (SBITMAP_ELT_TYPE);
1097 /* Often, it's reasonable just to solve all the equations in
1099 if (column_size * SBITMAP_SET_SIZE (y) <= max_bitmap_memory)
1102 /* Otherwise, pick the largest width we can, without going over the
1104 return SBITMAP_ELT_BITS * ((max_bitmap_memory + column_size - 1)
1108 /* Compute the local properties of each recorded expression.
1110 Local properties are those that are defined by the block, irrespective of
1113 An expression is transparent in a block if its operands are not modified
1116 An expression is computed (locally available) in a block if it is computed
1117 at least once and expression would contain the same value if the
1118 computation was moved to the end of the block.
1120 An expression is locally anticipatable in a block if it is computed at
1121 least once and expression would contain the same value if the computation
1122 was moved to the beginning of the block.
1124 We call this routine for cprop, pre and code hoisting. They all compute
1125 basically the same information and thus can easily share this code.
1127 TRANSP, COMP, and ANTLOC are destination sbitmaps for recording local
1128 properties. If NULL, then it is not necessary to compute or record that
1129 particular property.
1131 SETP controls which hash table to look at. If zero, this routine looks at
1132 the expr hash table; if nonzero this routine looks at the set hash table.
1133 Additionally, TRANSP is computed as ~TRANSP, since this is really cprop's
1137 compute_local_properties (transp, comp, antloc, setp)
1143 unsigned int i, hash_table_size;
1144 struct expr **hash_table;
1146 /* Initialize any bitmaps that were passed in. */
1150 sbitmap_vector_zero (transp, n_basic_blocks);
1152 sbitmap_vector_ones (transp, n_basic_blocks);
1156 sbitmap_vector_zero (comp, n_basic_blocks);
1158 sbitmap_vector_zero (antloc, n_basic_blocks);
1160 /* We use the same code for cprop, pre and hoisting. For cprop
1161 we care about the set hash table, for pre and hoisting we
1162 care about the expr hash table. */
1163 hash_table_size = setp ? set_hash_table_size : expr_hash_table_size;
1164 hash_table = setp ? set_hash_table : expr_hash_table;
1166 for (i = 0; i < hash_table_size; i++)
1170 for (expr = hash_table[i]; expr != NULL; expr = expr->next_same_hash)
1172 int indx = expr->bitmap_index;
1175 /* The expression is transparent in this block if it is not killed.
1176 We start by assuming all are transparent [none are killed], and
1177 then reset the bits for those that are. */
1179 compute_transp (expr->expr, indx, transp, setp);
1181 /* The occurrences recorded in antic_occr are exactly those that
1182 we want to set to non-zero in ANTLOC. */
1184 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
1186 SET_BIT (antloc[BLOCK_NUM (occr->insn)], indx);
1188 /* While we're scanning the table, this is a good place to
1190 occr->deleted_p = 0;
1193 /* The occurrences recorded in avail_occr are exactly those that
1194 we want to set to non-zero in COMP. */
1196 for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
1198 SET_BIT (comp[BLOCK_NUM (occr->insn)], indx);
1200 /* While we're scanning the table, this is a good place to
1205 /* While we're scanning the table, this is a good place to
1207 expr->reaching_reg = 0;
1212 /* Register set information.
1214 `reg_set_table' records where each register is set or otherwise
1217 static struct obstack reg_set_obstack;
1220 alloc_reg_set_mem (n_regs)
1225 reg_set_table_size = n_regs + REG_SET_TABLE_SLOP;
1226 n = reg_set_table_size * sizeof (struct reg_set *);
1227 reg_set_table = (struct reg_set **) gmalloc (n);
1228 memset ((char *) reg_set_table, 0, n);
1230 gcc_obstack_init (®_set_obstack);
1236 free (reg_set_table);
1237 obstack_free (®_set_obstack, NULL);
1240 /* Record REGNO in the reg_set table. */
1243 record_one_set (regno, insn)
1247 /* Allocate a new reg_set element and link it onto the list. */
1248 struct reg_set *new_reg_info;
1250 /* If the table isn't big enough, enlarge it. */
1251 if (regno >= reg_set_table_size)
1253 int new_size = regno + REG_SET_TABLE_SLOP;
1256 = (struct reg_set **) grealloc ((char *) reg_set_table,
1257 new_size * sizeof (struct reg_set *));
1258 memset ((char *) (reg_set_table + reg_set_table_size), 0,
1259 (new_size - reg_set_table_size) * sizeof (struct reg_set *));
1260 reg_set_table_size = new_size;
1263 new_reg_info = (struct reg_set *) obstack_alloc (®_set_obstack,
1264 sizeof (struct reg_set));
1265 bytes_used += sizeof (struct reg_set);
1266 new_reg_info->insn = insn;
1267 new_reg_info->next = reg_set_table[regno];
1268 reg_set_table[regno] = new_reg_info;
1271 /* Called from compute_sets via note_stores to handle one SET or CLOBBER in
1272 an insn. The DATA is really the instruction in which the SET is
1276 record_set_info (dest, setter, data)
1277 rtx dest, setter ATTRIBUTE_UNUSED;
1280 rtx record_set_insn = (rtx) data;
1282 if (GET_CODE (dest) == REG && REGNO (dest) >= FIRST_PSEUDO_REGISTER)
1283 record_one_set (REGNO (dest), record_set_insn);
1286 /* Scan the function and record each set of each pseudo-register.
1288 This is called once, at the start of the gcse pass. See the comments for
1289 `reg_set_table' for further documenation. */
1297 for (insn = f; insn != 0; insn = NEXT_INSN (insn))
1299 note_stores (PATTERN (insn), record_set_info, insn);
1302 /* Hash table support. */
1304 /* For each register, the cuid of the first/last insn in the block to set it,
1305 or -1 if not set. */
1306 #define NEVER_SET -1
1307 static int *reg_first_set;
1308 static int *reg_last_set;
1311 /* See whether X, the source of a set, is something we want to consider for
1318 static rtx test_insn = 0;
1319 int num_clobbers = 0;
1322 switch (GET_CODE (x))
1335 /* If this is a valid operand, we are OK. If it's VOIDmode, we aren't. */
1336 if (general_operand (x, GET_MODE (x)))
1338 else if (GET_MODE (x) == VOIDmode)
1341 /* Otherwise, check if we can make a valid insn from it. First initialize
1342 our test insn if we haven't already. */
1346 = make_insn_raw (gen_rtx_SET (VOIDmode,
1347 gen_rtx_REG (word_mode,
1348 FIRST_PSEUDO_REGISTER * 2),
1350 NEXT_INSN (test_insn) = PREV_INSN (test_insn) = 0;
1351 ggc_add_rtx_root (&test_insn, 1);
1354 /* Now make an insn like the one we would make when GCSE'ing and see if
1356 PUT_MODE (SET_DEST (PATTERN (test_insn)), GET_MODE (x));
1357 SET_SRC (PATTERN (test_insn)) = x;
1358 return ((icode = recog (PATTERN (test_insn), test_insn, &num_clobbers)) >= 0
1359 && (num_clobbers == 0 || ! added_clobbers_hard_reg_p (icode)));
1362 /* Return non-zero if the operands of expression X are unchanged from the
1363 start of INSN's basic block up to but not including INSN (if AVAIL_P == 0),
1364 or from INSN to the end of INSN's basic block (if AVAIL_P != 0). */
1367 oprs_unchanged_p (x, insn, avail_p)
1378 code = GET_CODE (x);
1383 return (reg_last_set[REGNO (x)] == NEVER_SET
1384 || reg_last_set[REGNO (x)] < INSN_CUID (insn));
1386 return (reg_first_set[REGNO (x)] == NEVER_SET
1387 || reg_first_set[REGNO (x)] >= INSN_CUID (insn));
1390 if (load_killed_in_block_p (BLOCK_FOR_INSN (insn), INSN_CUID (insn),
1394 return oprs_unchanged_p (XEXP (x, 0), insn, avail_p);
1419 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
1423 /* If we are about to do the last recursive call needed at this
1424 level, change it into iteration. This function is called enough
1427 return oprs_unchanged_p (XEXP (x, i), insn, avail_p);
1429 else if (! oprs_unchanged_p (XEXP (x, i), insn, avail_p))
1432 else if (fmt[i] == 'E')
1433 for (j = 0; j < XVECLEN (x, i); j++)
1434 if (! oprs_unchanged_p (XVECEXP (x, i, j), insn, avail_p))
1441 /* Used for communication between mems_conflict_for_gcse_p and
1442 load_killed_in_block_p. Nonzero if mems_conflict_for_gcse_p finds a
1443 conflict between two memory references. */
1444 static int gcse_mems_conflict_p;
1446 /* Used for communication between mems_conflict_for_gcse_p and
1447 load_killed_in_block_p. A memory reference for a load instruction,
1448 mems_conflict_for_gcse_p will see if a memory store conflicts with
1449 this memory load. */
1450 static rtx gcse_mem_operand;
1452 /* DEST is the output of an instruction. If it is a memory reference, and
1453 possibly conflicts with the load found in gcse_mem_operand, then set
1454 gcse_mems_conflict_p to a nonzero value. */
1457 mems_conflict_for_gcse_p (dest, setter, data)
1458 rtx dest, setter ATTRIBUTE_UNUSED;
1459 void *data ATTRIBUTE_UNUSED;
1461 while (GET_CODE (dest) == SUBREG
1462 || GET_CODE (dest) == ZERO_EXTRACT
1463 || GET_CODE (dest) == SIGN_EXTRACT
1464 || GET_CODE (dest) == STRICT_LOW_PART)
1465 dest = XEXP (dest, 0);
1467 /* If DEST is not a MEM, then it will not conflict with the load. Note
1468 that function calls are assumed to clobber memory, but are handled
1470 if (GET_CODE (dest) != MEM)
1472 /* If we are setting a MEM in our list of specially recognized MEMs,
1473 don't mark as killed this time. */
1475 if (dest == gcse_mem_operand && pre_ldst_mems != NULL)
1477 if (!find_rtx_in_ldst (dest))
1478 gcse_mems_conflict_p = 1;
1481 if (true_dependence (dest, GET_MODE (dest), gcse_mem_operand,
1483 gcse_mems_conflict_p = 1;
1486 /* Return nonzero if the expression in X (a memory reference) is killed
1487 in block BB before or after the insn with the CUID in UID_LIMIT.
1488 AVAIL_P is nonzero for kills after UID_LIMIT, and zero for kills
1491 To check the entire block, set UID_LIMIT to max_uid + 1 and
1495 load_killed_in_block_p (bb, uid_limit, x, avail_p)
1501 rtx list_entry = modify_mem_list[bb->index];
1505 /* Ignore entries in the list that do not apply. */
1507 && INSN_CUID (XEXP (list_entry, 0)) < uid_limit)
1509 && INSN_CUID (XEXP (list_entry, 0)) > uid_limit))
1511 list_entry = XEXP (list_entry, 1);
1515 setter = XEXP (list_entry, 0);
1517 /* If SETTER is a call everything is clobbered. Note that calls
1518 to pure functions are never put on the list, so we need not
1519 worry about them. */
1520 if (GET_CODE (setter) == CALL_INSN)
1523 /* SETTER must be an INSN of some kind that sets memory. Call
1524 note_stores to examine each hunk of memory that is modified.
1526 The note_stores interface is pretty limited, so we have to
1527 communicate via global variables. Yuk. */
1528 gcse_mem_operand = x;
1529 gcse_mems_conflict_p = 0;
1530 note_stores (PATTERN (setter), mems_conflict_for_gcse_p, NULL);
1531 if (gcse_mems_conflict_p)
1533 list_entry = XEXP (list_entry, 1);
1538 /* Return non-zero if the operands of expression X are unchanged from
1539 the start of INSN's basic block up to but not including INSN. */
1542 oprs_anticipatable_p (x, insn)
1545 return oprs_unchanged_p (x, insn, 0);
1548 /* Return non-zero if the operands of expression X are unchanged from
1549 INSN to the end of INSN's basic block. */
1552 oprs_available_p (x, insn)
1555 return oprs_unchanged_p (x, insn, 1);
1558 /* Hash expression X.
1560 MODE is only used if X is a CONST_INT. DO_NOT_RECORD_P is a boolean
1561 indicating if a volatile operand is found or if the expression contains
1562 something we don't want to insert in the table.
1564 ??? One might want to merge this with canon_hash. Later. */
1567 hash_expr (x, mode, do_not_record_p, hash_table_size)
1569 enum machine_mode mode;
1570 int *do_not_record_p;
1571 int hash_table_size;
1575 *do_not_record_p = 0;
1577 hash = hash_expr_1 (x, mode, do_not_record_p);
1578 return hash % hash_table_size;
1581 /* Hash a string. Just add its bytes up. */
1583 static inline unsigned
1588 const unsigned char *p = (const unsigned char *)ps;
1597 /* Subroutine of hash_expr to do the actual work. */
1600 hash_expr_1 (x, mode, do_not_record_p)
1602 enum machine_mode mode;
1603 int *do_not_record_p;
1610 /* Used to turn recursion into iteration. We can't rely on GCC's
1611 tail-recursion eliminatio since we need to keep accumulating values
1618 code = GET_CODE (x);
1622 hash += ((unsigned int) REG << 7) + REGNO (x);
1626 hash += (((unsigned int) CONST_INT << 7) + (unsigned int) mode
1627 + (unsigned int) INTVAL (x));
1631 /* This is like the general case, except that it only counts
1632 the integers representing the constant. */
1633 hash += (unsigned int) code + (unsigned int) GET_MODE (x);
1634 if (GET_MODE (x) != VOIDmode)
1635 for (i = 2; i < GET_RTX_LENGTH (CONST_DOUBLE); i++)
1636 hash += (unsigned int) XWINT (x, i);
1638 hash += ((unsigned int) CONST_DOUBLE_LOW (x)
1639 + (unsigned int) CONST_DOUBLE_HIGH (x));
1642 /* Assume there is only one rtx object for any given label. */
1644 /* We don't hash on the address of the CODE_LABEL to avoid bootstrap
1645 differences and differences between each stage's debugging dumps. */
1646 hash += (((unsigned int) LABEL_REF << 7)
1647 + CODE_LABEL_NUMBER (XEXP (x, 0)));
1652 /* Don't hash on the symbol's address to avoid bootstrap differences.
1653 Different hash values may cause expressions to be recorded in
1654 different orders and thus different registers to be used in the
1655 final assembler. This also avoids differences in the dump files
1656 between various stages. */
1658 const unsigned char *p = (const unsigned char *) XSTR (x, 0);
1661 h += (h << 7) + *p++; /* ??? revisit */
1663 hash += ((unsigned int) SYMBOL_REF << 7) + h;
1668 if (MEM_VOLATILE_P (x))
1670 *do_not_record_p = 1;
1674 hash += (unsigned int) MEM;
1675 hash += MEM_ALIAS_SET (x);
1686 case UNSPEC_VOLATILE:
1687 *do_not_record_p = 1;
1691 if (MEM_VOLATILE_P (x))
1693 *do_not_record_p = 1;
1698 /* We don't want to take the filename and line into account. */
1699 hash += (unsigned) code + (unsigned) GET_MODE (x)
1700 + hash_string_1 (ASM_OPERANDS_TEMPLATE (x))
1701 + hash_string_1 (ASM_OPERANDS_OUTPUT_CONSTRAINT (x))
1702 + (unsigned) ASM_OPERANDS_OUTPUT_IDX (x);
1704 if (ASM_OPERANDS_INPUT_LENGTH (x))
1706 for (i = 1; i < ASM_OPERANDS_INPUT_LENGTH (x); i++)
1708 hash += (hash_expr_1 (ASM_OPERANDS_INPUT (x, i),
1709 GET_MODE (ASM_OPERANDS_INPUT (x, i)),
1711 + hash_string_1 (ASM_OPERANDS_INPUT_CONSTRAINT
1715 hash += hash_string_1 (ASM_OPERANDS_INPUT_CONSTRAINT (x, 0));
1716 x = ASM_OPERANDS_INPUT (x, 0);
1717 mode = GET_MODE (x);
1727 hash += (unsigned) code + (unsigned) GET_MODE (x);
1728 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
1732 /* If we are about to do the last recursive call
1733 needed at this level, change it into iteration.
1734 This function is called enough to be worth it. */
1741 hash += hash_expr_1 (XEXP (x, i), 0, do_not_record_p);
1742 if (*do_not_record_p)
1746 else if (fmt[i] == 'E')
1747 for (j = 0; j < XVECLEN (x, i); j++)
1749 hash += hash_expr_1 (XVECEXP (x, i, j), 0, do_not_record_p);
1750 if (*do_not_record_p)
1754 else if (fmt[i] == 's')
1755 hash += hash_string_1 (XSTR (x, i));
1756 else if (fmt[i] == 'i')
1757 hash += (unsigned int) XINT (x, i);
1758 else if (fmt[i] == 't');
1766 /* Hash a set of register REGNO.
1768 Sets are hashed on the register that is set. This simplifies the PRE copy
1771 ??? May need to make things more elaborate. Later, as necessary. */
1774 hash_set (regno, hash_table_size)
1776 int hash_table_size;
1781 return hash % hash_table_size;
1784 /* Return non-zero if exp1 is equivalent to exp2.
1785 ??? Borrowed from cse.c. Might want to remerge with cse.c. Later. */
1792 register enum rtx_code code;
1793 register const char *fmt;
1798 if (x == 0 || y == 0)
1801 code = GET_CODE (x);
1802 if (code != GET_CODE (y))
1805 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. */
1806 if (GET_MODE (x) != GET_MODE (y))
1816 return INTVAL (x) == INTVAL (y);
1819 return XEXP (x, 0) == XEXP (y, 0);
1822 return XSTR (x, 0) == XSTR (y, 0);
1825 return REGNO (x) == REGNO (y);
1828 /* Can't merge two expressions in different alias sets, since we can
1829 decide that the expression is transparent in a block when it isn't,
1830 due to it being set with the different alias set. */
1831 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
1835 /* For commutative operations, check both orders. */
1843 return ((expr_equiv_p (XEXP (x, 0), XEXP (y, 0))
1844 && expr_equiv_p (XEXP (x, 1), XEXP (y, 1)))
1845 || (expr_equiv_p (XEXP (x, 0), XEXP (y, 1))
1846 && expr_equiv_p (XEXP (x, 1), XEXP (y, 0))));
1849 /* We don't use the generic code below because we want to
1850 disregard filename and line numbers. */
1852 /* A volatile asm isn't equivalent to any other. */
1853 if (MEM_VOLATILE_P (x) || MEM_VOLATILE_P (y))
1856 if (GET_MODE (x) != GET_MODE (y)
1857 || strcmp (ASM_OPERANDS_TEMPLATE (x), ASM_OPERANDS_TEMPLATE (y))
1858 || strcmp (ASM_OPERANDS_OUTPUT_CONSTRAINT (x),
1859 ASM_OPERANDS_OUTPUT_CONSTRAINT (y))
1860 || ASM_OPERANDS_OUTPUT_IDX (x) != ASM_OPERANDS_OUTPUT_IDX (y)
1861 || ASM_OPERANDS_INPUT_LENGTH (x) != ASM_OPERANDS_INPUT_LENGTH (y))
1864 if (ASM_OPERANDS_INPUT_LENGTH (x))
1866 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
1867 if (! expr_equiv_p (ASM_OPERANDS_INPUT (x, i),
1868 ASM_OPERANDS_INPUT (y, i))
1869 || strcmp (ASM_OPERANDS_INPUT_CONSTRAINT (x, i),
1870 ASM_OPERANDS_INPUT_CONSTRAINT (y, i)))
1880 /* Compare the elements. If any pair of corresponding elements
1881 fail to match, return 0 for the whole thing. */
1883 fmt = GET_RTX_FORMAT (code);
1884 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1889 if (! expr_equiv_p (XEXP (x, i), XEXP (y, i)))
1894 if (XVECLEN (x, i) != XVECLEN (y, i))
1896 for (j = 0; j < XVECLEN (x, i); j++)
1897 if (! expr_equiv_p (XVECEXP (x, i, j), XVECEXP (y, i, j)))
1902 if (strcmp (XSTR (x, i), XSTR (y, i)))
1907 if (XINT (x, i) != XINT (y, i))
1912 if (XWINT (x, i) != XWINT (y, i))
1928 /* Insert expression X in INSN in the hash table.
1929 If it is already present, record it as the last occurrence in INSN's
1932 MODE is the mode of the value X is being stored into.
1933 It is only used if X is a CONST_INT.
1935 ANTIC_P is non-zero if X is an anticipatable expression.
1936 AVAIL_P is non-zero if X is an available expression. */
1939 insert_expr_in_table (x, mode, insn, antic_p, avail_p)
1941 enum machine_mode mode;
1943 int antic_p, avail_p;
1945 int found, do_not_record_p;
1947 struct expr *cur_expr, *last_expr = NULL;
1948 struct occr *antic_occr, *avail_occr;
1949 struct occr *last_occr = NULL;
1951 hash = hash_expr (x, mode, &do_not_record_p, expr_hash_table_size);
1953 /* Do not insert expression in table if it contains volatile operands,
1954 or if hash_expr determines the expression is something we don't want
1955 to or can't handle. */
1956 if (do_not_record_p)
1959 cur_expr = expr_hash_table[hash];
1962 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
1964 /* If the expression isn't found, save a pointer to the end of
1966 last_expr = cur_expr;
1967 cur_expr = cur_expr->next_same_hash;
1972 cur_expr = (struct expr *) gcse_alloc (sizeof (struct expr));
1973 bytes_used += sizeof (struct expr);
1974 if (expr_hash_table[hash] == NULL)
1975 /* This is the first pattern that hashed to this index. */
1976 expr_hash_table[hash] = cur_expr;
1978 /* Add EXPR to end of this hash chain. */
1979 last_expr->next_same_hash = cur_expr;
1981 /* Set the fields of the expr element. */
1983 cur_expr->bitmap_index = n_exprs++;
1984 cur_expr->next_same_hash = NULL;
1985 cur_expr->antic_occr = NULL;
1986 cur_expr->avail_occr = NULL;
1989 /* Now record the occurrence(s). */
1992 antic_occr = cur_expr->antic_occr;
1994 /* Search for another occurrence in the same basic block. */
1995 while (antic_occr && BLOCK_NUM (antic_occr->insn) != BLOCK_NUM (insn))
1997 /* If an occurrence isn't found, save a pointer to the end of
1999 last_occr = antic_occr;
2000 antic_occr = antic_occr->next;
2004 /* Found another instance of the expression in the same basic block.
2005 Prefer the currently recorded one. We want the first one in the
2006 block and the block is scanned from start to end. */
2007 ; /* nothing to do */
2010 /* First occurrence of this expression in this basic block. */
2011 antic_occr = (struct occr *) gcse_alloc (sizeof (struct occr));
2012 bytes_used += sizeof (struct occr);
2013 /* First occurrence of this expression in any block? */
2014 if (cur_expr->antic_occr == NULL)
2015 cur_expr->antic_occr = antic_occr;
2017 last_occr->next = antic_occr;
2019 antic_occr->insn = insn;
2020 antic_occr->next = NULL;
2026 avail_occr = cur_expr->avail_occr;
2028 /* Search for another occurrence in the same basic block. */
2029 while (avail_occr && BLOCK_NUM (avail_occr->insn) != BLOCK_NUM (insn))
2031 /* If an occurrence isn't found, save a pointer to the end of
2033 last_occr = avail_occr;
2034 avail_occr = avail_occr->next;
2038 /* Found another instance of the expression in the same basic block.
2039 Prefer this occurrence to the currently recorded one. We want
2040 the last one in the block and the block is scanned from start
2042 avail_occr->insn = insn;
2045 /* First occurrence of this expression in this basic block. */
2046 avail_occr = (struct occr *) gcse_alloc (sizeof (struct occr));
2047 bytes_used += sizeof (struct occr);
2049 /* First occurrence of this expression in any block? */
2050 if (cur_expr->avail_occr == NULL)
2051 cur_expr->avail_occr = avail_occr;
2053 last_occr->next = avail_occr;
2055 avail_occr->insn = insn;
2056 avail_occr->next = NULL;
2061 /* Insert pattern X in INSN in the hash table.
2062 X is a SET of a reg to either another reg or a constant.
2063 If it is already present, record it as the last occurrence in INSN's
2067 insert_set_in_table (x, insn)
2073 struct expr *cur_expr, *last_expr = NULL;
2074 struct occr *cur_occr, *last_occr = NULL;
2076 if (GET_CODE (x) != SET
2077 || GET_CODE (SET_DEST (x)) != REG)
2080 hash = hash_set (REGNO (SET_DEST (x)), set_hash_table_size);
2082 cur_expr = set_hash_table[hash];
2085 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
2087 /* If the expression isn't found, save a pointer to the end of
2089 last_expr = cur_expr;
2090 cur_expr = cur_expr->next_same_hash;
2095 cur_expr = (struct expr *) gcse_alloc (sizeof (struct expr));
2096 bytes_used += sizeof (struct expr);
2097 if (set_hash_table[hash] == NULL)
2098 /* This is the first pattern that hashed to this index. */
2099 set_hash_table[hash] = cur_expr;
2101 /* Add EXPR to end of this hash chain. */
2102 last_expr->next_same_hash = cur_expr;
2104 /* Set the fields of the expr element.
2105 We must copy X because it can be modified when copy propagation is
2106 performed on its operands. */
2107 cur_expr->expr = copy_rtx (x);
2108 cur_expr->bitmap_index = n_sets++;
2109 cur_expr->next_same_hash = NULL;
2110 cur_expr->antic_occr = NULL;
2111 cur_expr->avail_occr = NULL;
2114 /* Now record the occurrence. */
2115 cur_occr = cur_expr->avail_occr;
2117 /* Search for another occurrence in the same basic block. */
2118 while (cur_occr && BLOCK_NUM (cur_occr->insn) != BLOCK_NUM (insn))
2120 /* If an occurrence isn't found, save a pointer to the end of
2122 last_occr = cur_occr;
2123 cur_occr = cur_occr->next;
2127 /* Found another instance of the expression in the same basic block.
2128 Prefer this occurrence to the currently recorded one. We want the
2129 last one in the block and the block is scanned from start to end. */
2130 cur_occr->insn = insn;
2133 /* First occurrence of this expression in this basic block. */
2134 cur_occr = (struct occr *) gcse_alloc (sizeof (struct occr));
2135 bytes_used += sizeof (struct occr);
2137 /* First occurrence of this expression in any block? */
2138 if (cur_expr->avail_occr == NULL)
2139 cur_expr->avail_occr = cur_occr;
2141 last_occr->next = cur_occr;
2143 cur_occr->insn = insn;
2144 cur_occr->next = NULL;
2148 /* Scan pattern PAT of INSN and add an entry to the hash table. If SET_P is
2149 non-zero, this is for the assignment hash table, otherwise it is for the
2150 expression hash table. */
2153 hash_scan_set (pat, insn, set_p)
2157 rtx src = SET_SRC (pat);
2158 rtx dest = SET_DEST (pat);
2161 if (GET_CODE (src) == CALL)
2162 hash_scan_call (src, insn);
2164 else if (GET_CODE (dest) == REG)
2166 unsigned int regno = REGNO (dest);
2169 /* If this is a single set and we are doing constant propagation,
2170 see if a REG_NOTE shows this equivalent to a constant. */
2171 if (set_p && (note = find_reg_equal_equiv_note (insn)) != 0
2172 && CONSTANT_P (XEXP (note, 0)))
2173 src = XEXP (note, 0), pat = gen_rtx_SET (VOIDmode, dest, src);
2175 /* Only record sets of pseudo-regs in the hash table. */
2177 && regno >= FIRST_PSEUDO_REGISTER
2178 /* Don't GCSE something if we can't do a reg/reg copy. */
2179 && can_copy_p [GET_MODE (dest)]
2180 /* Is SET_SRC something we want to gcse? */
2181 && want_to_gcse_p (src)
2182 /* Don't CSE a nop. */
2183 && ! set_noop_p (pat)
2184 /* Don't GCSE if it has attached REG_EQUIV note.
2185 At this point this only function parameters should have
2186 REG_EQUIV notes and if the argument slot is used somewhere
2187 explicitely, it means address of parameter has been taken,
2188 so we should not extend the lifetime of the pseudo. */
2189 && ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) == 0
2190 || GET_CODE (XEXP (note, 0)) != MEM))
2192 /* An expression is not anticipatable if its operands are
2193 modified before this insn or if this is not the only SET in
2195 int antic_p = oprs_anticipatable_p (src, insn) && single_set (insn);
2196 /* An expression is not available if its operands are
2197 subsequently modified, including this insn. */
2198 int avail_p = oprs_available_p (src, insn);
2200 insert_expr_in_table (src, GET_MODE (dest), insn, antic_p, avail_p);
2203 /* Record sets for constant/copy propagation. */
2205 && regno >= FIRST_PSEUDO_REGISTER
2206 && ((GET_CODE (src) == REG
2207 && REGNO (src) >= FIRST_PSEUDO_REGISTER
2208 && can_copy_p [GET_MODE (dest)]
2209 && REGNO (src) != regno)
2210 || GET_CODE (src) == CONST_INT
2211 || GET_CODE (src) == SYMBOL_REF
2212 || GET_CODE (src) == CONST_DOUBLE)
2213 /* A copy is not available if its src or dest is subsequently
2214 modified. Here we want to search from INSN+1 on, but
2215 oprs_available_p searches from INSN on. */
2216 && (insn == BLOCK_END (BLOCK_NUM (insn))
2217 || ((tmp = next_nonnote_insn (insn)) != NULL_RTX
2218 && oprs_available_p (pat, tmp))))
2219 insert_set_in_table (pat, insn);
2224 hash_scan_clobber (x, insn)
2225 rtx x ATTRIBUTE_UNUSED, insn ATTRIBUTE_UNUSED;
2227 /* Currently nothing to do. */
2231 hash_scan_call (x, insn)
2232 rtx x ATTRIBUTE_UNUSED, insn ATTRIBUTE_UNUSED;
2234 /* Currently nothing to do. */
2237 /* Process INSN and add hash table entries as appropriate.
2239 Only available expressions that set a single pseudo-reg are recorded.
2241 Single sets in a PARALLEL could be handled, but it's an extra complication
2242 that isn't dealt with right now. The trick is handling the CLOBBERs that
2243 are also in the PARALLEL. Later.
2245 If SET_P is non-zero, this is for the assignment hash table,
2246 otherwise it is for the expression hash table.
2247 If IN_LIBCALL_BLOCK nonzero, we are in a libcall block, and should
2248 not record any expressions. */
2251 hash_scan_insn (insn, set_p, in_libcall_block)
2254 int in_libcall_block;
2256 rtx pat = PATTERN (insn);
2259 if (in_libcall_block)
2262 /* Pick out the sets of INSN and for other forms of instructions record
2263 what's been modified. */
2265 if (GET_CODE (pat) == SET)
2266 hash_scan_set (pat, insn, set_p);
2267 else if (GET_CODE (pat) == PARALLEL)
2268 for (i = 0; i < XVECLEN (pat, 0); i++)
2270 rtx x = XVECEXP (pat, 0, i);
2272 if (GET_CODE (x) == SET)
2273 hash_scan_set (x, insn, set_p);
2274 else if (GET_CODE (x) == CLOBBER)
2275 hash_scan_clobber (x, insn);
2276 else if (GET_CODE (x) == CALL)
2277 hash_scan_call (x, insn);
2280 else if (GET_CODE (pat) == CLOBBER)
2281 hash_scan_clobber (pat, insn);
2282 else if (GET_CODE (pat) == CALL)
2283 hash_scan_call (pat, insn);
2287 dump_hash_table (file, name, table, table_size, total_size)
2290 struct expr **table;
2291 int table_size, total_size;
2294 /* Flattened out table, so it's printed in proper order. */
2295 struct expr **flat_table;
2296 unsigned int *hash_val;
2300 = (struct expr **) xcalloc (total_size, sizeof (struct expr *));
2301 hash_val = (unsigned int *) xmalloc (total_size * sizeof (unsigned int));
2303 for (i = 0; i < table_size; i++)
2304 for (expr = table[i]; expr != NULL; expr = expr->next_same_hash)
2306 flat_table[expr->bitmap_index] = expr;
2307 hash_val[expr->bitmap_index] = i;
2310 fprintf (file, "%s hash table (%d buckets, %d entries)\n",
2311 name, table_size, total_size);
2313 for (i = 0; i < total_size; i++)
2314 if (flat_table[i] != 0)
2316 expr = flat_table[i];
2317 fprintf (file, "Index %d (hash value %d)\n ",
2318 expr->bitmap_index, hash_val[i]);
2319 print_rtl (file, expr->expr);
2320 fprintf (file, "\n");
2323 fprintf (file, "\n");
2329 /* Record register first/last/block set information for REGNO in INSN.
2331 reg_first_set records the first place in the block where the register
2332 is set and is used to compute "anticipatability".
2334 reg_last_set records the last place in the block where the register
2335 is set and is used to compute "availability".
2337 reg_set_in_block records whether the register is set in the block
2338 and is used to compute "transparency". */
2341 record_last_reg_set_info (insn, regno)
2345 if (reg_first_set[regno] == NEVER_SET)
2346 reg_first_set[regno] = INSN_CUID (insn);
2348 reg_last_set[regno] = INSN_CUID (insn);
2349 SET_BIT (reg_set_in_block[BLOCK_NUM (insn)], regno);
2353 /* Record all of the canonicalized MEMs of record_last_mem_set_info's insn.
2354 Note we store a pair of elements in the list, so they have to be
2355 taken off pairwise. */
2358 canon_list_insert (dest, unused1, v_insn)
2359 rtx dest ATTRIBUTE_UNUSED;
2360 rtx unused1 ATTRIBUTE_UNUSED;
2363 rtx dest_addr, insn;
2365 while (GET_CODE (dest) == SUBREG
2366 || GET_CODE (dest) == ZERO_EXTRACT
2367 || GET_CODE (dest) == SIGN_EXTRACT
2368 || GET_CODE (dest) == STRICT_LOW_PART)
2369 dest = XEXP (dest, 0);
2371 /* If DEST is not a MEM, then it will not conflict with a load. Note
2372 that function calls are assumed to clobber memory, but are handled
2375 if (GET_CODE (dest) != MEM)
2378 dest_addr = get_addr (XEXP (dest, 0));
2379 dest_addr = canon_rtx (dest_addr);
2380 insn = (rtx) v_insn;
2382 canon_modify_mem_list[BLOCK_NUM (insn)] =
2383 alloc_INSN_LIST (dest_addr, canon_modify_mem_list[BLOCK_NUM (insn)]);
2384 canon_modify_mem_list[BLOCK_NUM (insn)] =
2385 alloc_INSN_LIST (dest, canon_modify_mem_list[BLOCK_NUM (insn)]);
2388 /* Record memory modification information for INSN. We do not actually care
2389 about the memory location(s) that are set, or even how they are set (consider
2390 a CALL_INSN). We merely need to record which insns modify memory. */
2393 record_last_mem_set_info (insn)
2396 /* load_killed_in_block_p will handle the case of calls clobbering
2398 modify_mem_list[BLOCK_NUM (insn)] =
2399 alloc_INSN_LIST (insn, modify_mem_list[BLOCK_NUM (insn)]);
2401 if (GET_CODE (insn) == CALL_INSN)
2403 /* Note that traversals of this loop (other than for free-ing)
2404 will break after encountering a CALL_INSN. So, there's no
2405 need to insert a pair of items, as canon_list_insert does. */
2406 canon_modify_mem_list[BLOCK_NUM (insn)] =
2407 alloc_INSN_LIST (insn, canon_modify_mem_list[BLOCK_NUM (insn)]);
2410 note_stores (PATTERN (insn), canon_list_insert, (void*)insn );
2413 /* Called from compute_hash_table via note_stores to handle one
2414 SET or CLOBBER in an insn. DATA is really the instruction in which
2415 the SET is taking place. */
2418 record_last_set_info (dest, setter, data)
2419 rtx dest, setter ATTRIBUTE_UNUSED;
2422 rtx last_set_insn = (rtx) data;
2424 if (GET_CODE (dest) == SUBREG)
2425 dest = SUBREG_REG (dest);
2427 if (GET_CODE (dest) == REG)
2428 record_last_reg_set_info (last_set_insn, REGNO (dest));
2429 else if (GET_CODE (dest) == MEM
2430 /* Ignore pushes, they clobber nothing. */
2431 && ! push_operand (dest, GET_MODE (dest)))
2432 record_last_mem_set_info (last_set_insn);
2435 /* Top level function to create an expression or assignment hash table.
2437 Expression entries are placed in the hash table if
2438 - they are of the form (set (pseudo-reg) src),
2439 - src is something we want to perform GCSE on,
2440 - none of the operands are subsequently modified in the block
2442 Assignment entries are placed in the hash table if
2443 - they are of the form (set (pseudo-reg) src),
2444 - src is something we want to perform const/copy propagation on,
2445 - none of the operands or target are subsequently modified in the block
2447 Currently src must be a pseudo-reg or a const_int.
2449 F is the first insn.
2450 SET_P is non-zero for computing the assignment hash table. */
2453 compute_hash_table (set_p)
2458 /* While we compute the hash table we also compute a bit array of which
2459 registers are set in which blocks.
2460 ??? This isn't needed during const/copy propagation, but it's cheap to
2462 sbitmap_vector_zero (reg_set_in_block, n_basic_blocks);
2464 /* re-Cache any INSN_LIST nodes we have allocated. */
2467 for (i = 0; i < n_basic_blocks; i++)
2469 if (modify_mem_list[i])
2470 free_INSN_LIST_list (modify_mem_list + i);
2471 if (canon_modify_mem_list[i])
2472 free_INSN_LIST_list (canon_modify_mem_list + i);
2475 /* Some working arrays used to track first and last set in each block. */
2476 /* ??? One could use alloca here, but at some size a threshold is crossed
2477 beyond which one should use malloc. Are we at that threshold here? */
2478 reg_first_set = (int *) gmalloc (max_gcse_regno * sizeof (int));
2479 reg_last_set = (int *) gmalloc (max_gcse_regno * sizeof (int));
2481 for (bb = 0; bb < n_basic_blocks; bb++)
2485 int in_libcall_block;
2488 /* First pass over the instructions records information used to
2489 determine when registers and memory are first and last set.
2490 ??? hard-reg reg_set_in_block computation
2491 could be moved to compute_sets since they currently don't change. */
2493 for (i = 0; i < max_gcse_regno; i++)
2494 reg_first_set[i] = reg_last_set[i] = NEVER_SET;
2497 for (insn = BLOCK_HEAD (bb);
2498 insn && insn != NEXT_INSN (BLOCK_END (bb));
2499 insn = NEXT_INSN (insn))
2501 #ifdef NON_SAVING_SETJMP
2502 if (NON_SAVING_SETJMP && GET_CODE (insn) == NOTE
2503 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_SETJMP)
2505 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
2506 record_last_reg_set_info (insn, regno);
2511 if (! INSN_P (insn))
2514 if (GET_CODE (insn) == CALL_INSN)
2516 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
2517 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
2518 record_last_reg_set_info (insn, regno);
2520 if (! CONST_CALL_P (insn))
2521 record_last_mem_set_info (insn);
2524 note_stores (PATTERN (insn), record_last_set_info, insn);
2527 /* The next pass builds the hash table. */
2529 for (insn = BLOCK_HEAD (bb), in_libcall_block = 0;
2530 insn && insn != NEXT_INSN (BLOCK_END (bb));
2531 insn = NEXT_INSN (insn))
2534 if (find_reg_note (insn, REG_LIBCALL, NULL_RTX))
2535 in_libcall_block = 1;
2536 else if (find_reg_note (insn, REG_RETVAL, NULL_RTX))
2537 in_libcall_block = 0;
2538 hash_scan_insn (insn, set_p, in_libcall_block);
2542 free (reg_first_set);
2543 free (reg_last_set);
2545 /* Catch bugs early. */
2546 reg_first_set = reg_last_set = 0;
2549 /* Allocate space for the set hash table.
2550 N_INSNS is the number of instructions in the function.
2551 It is used to determine the number of buckets to use. */
2554 alloc_set_hash_table (n_insns)
2559 set_hash_table_size = n_insns / 4;
2560 if (set_hash_table_size < 11)
2561 set_hash_table_size = 11;
2563 /* Attempt to maintain efficient use of hash table.
2564 Making it an odd number is simplest for now.
2565 ??? Later take some measurements. */
2566 set_hash_table_size |= 1;
2567 n = set_hash_table_size * sizeof (struct expr *);
2568 set_hash_table = (struct expr **) gmalloc (n);
2571 /* Free things allocated by alloc_set_hash_table. */
2574 free_set_hash_table ()
2576 free (set_hash_table);
2579 /* Compute the hash table for doing copy/const propagation. */
2582 compute_set_hash_table ()
2584 /* Initialize count of number of entries in hash table. */
2586 memset ((char *) set_hash_table, 0,
2587 set_hash_table_size * sizeof (struct expr *));
2589 compute_hash_table (1);
2592 /* Allocate space for the expression hash table.
2593 N_INSNS is the number of instructions in the function.
2594 It is used to determine the number of buckets to use. */
2597 alloc_expr_hash_table (n_insns)
2598 unsigned int n_insns;
2602 expr_hash_table_size = n_insns / 2;
2603 /* Make sure the amount is usable. */
2604 if (expr_hash_table_size < 11)
2605 expr_hash_table_size = 11;
2607 /* Attempt to maintain efficient use of hash table.
2608 Making it an odd number is simplest for now.
2609 ??? Later take some measurements. */
2610 expr_hash_table_size |= 1;
2611 n = expr_hash_table_size * sizeof (struct expr *);
2612 expr_hash_table = (struct expr **) gmalloc (n);
2615 /* Free things allocated by alloc_expr_hash_table. */
2618 free_expr_hash_table ()
2620 free (expr_hash_table);
2623 /* Compute the hash table for doing GCSE. */
2626 compute_expr_hash_table ()
2628 /* Initialize count of number of entries in hash table. */
2630 memset ((char *) expr_hash_table, 0,
2631 expr_hash_table_size * sizeof (struct expr *));
2633 compute_hash_table (0);
2636 /* Expression tracking support. */
2638 /* Lookup pattern PAT in the expression table.
2639 The result is a pointer to the table entry, or NULL if not found. */
2641 static struct expr *
2645 int do_not_record_p;
2646 unsigned int hash = hash_expr (pat, GET_MODE (pat), &do_not_record_p,
2647 expr_hash_table_size);
2650 if (do_not_record_p)
2653 expr = expr_hash_table[hash];
2655 while (expr && ! expr_equiv_p (expr->expr, pat))
2656 expr = expr->next_same_hash;
2661 /* Lookup REGNO in the set table. If PAT is non-NULL look for the entry that
2662 matches it, otherwise return the first entry for REGNO. The result is a
2663 pointer to the table entry, or NULL if not found. */
2665 static struct expr *
2666 lookup_set (regno, pat)
2670 unsigned int hash = hash_set (regno, set_hash_table_size);
2673 expr = set_hash_table[hash];
2677 while (expr && ! expr_equiv_p (expr->expr, pat))
2678 expr = expr->next_same_hash;
2682 while (expr && REGNO (SET_DEST (expr->expr)) != regno)
2683 expr = expr->next_same_hash;
2689 /* Return the next entry for REGNO in list EXPR. */
2691 static struct expr *
2692 next_set (regno, expr)
2697 expr = expr->next_same_hash;
2698 while (expr && REGNO (SET_DEST (expr->expr)) != regno);
2703 /* Reset tables used to keep track of what's still available [since the
2704 start of the block]. */
2707 reset_opr_set_tables ()
2709 /* Maintain a bitmap of which regs have been set since beginning of
2711 sbitmap_zero (reg_set_bitmap);
2713 /* Also keep a record of the last instruction to modify memory.
2714 For now this is very trivial, we only record whether any memory
2715 location has been modified. */
2719 /* re-Cache any INSN_LIST nodes we have allocated. */
2720 for (i = 0; i < n_basic_blocks; i++)
2722 if (modify_mem_list[i])
2723 free_INSN_LIST_list (modify_mem_list + i);
2724 if (canon_modify_mem_list[i])
2725 free_INSN_LIST_list (canon_modify_mem_list + i);
2730 /* Return non-zero if the operands of X are not set before INSN in
2731 INSN's basic block. */
2734 oprs_not_set_p (x, insn)
2744 code = GET_CODE (x);
2759 if (load_killed_in_block_p (BLOCK_FOR_INSN (insn),
2760 INSN_CUID (insn), x, 0))
2763 return oprs_not_set_p (XEXP (x, 0), insn);
2766 return ! TEST_BIT (reg_set_bitmap, REGNO (x));
2772 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2776 /* If we are about to do the last recursive call
2777 needed at this level, change it into iteration.
2778 This function is called enough to be worth it. */
2780 return oprs_not_set_p (XEXP (x, i), insn);
2782 if (! oprs_not_set_p (XEXP (x, i), insn))
2785 else if (fmt[i] == 'E')
2786 for (j = 0; j < XVECLEN (x, i); j++)
2787 if (! oprs_not_set_p (XVECEXP (x, i, j), insn))
2794 /* Mark things set by a CALL. */
2800 if (! CONST_CALL_P (insn))
2801 record_last_mem_set_info (insn);
2804 /* Mark things set by a SET. */
2807 mark_set (pat, insn)
2810 rtx dest = SET_DEST (pat);
2812 while (GET_CODE (dest) == SUBREG
2813 || GET_CODE (dest) == ZERO_EXTRACT
2814 || GET_CODE (dest) == SIGN_EXTRACT
2815 || GET_CODE (dest) == STRICT_LOW_PART)
2816 dest = XEXP (dest, 0);
2818 if (GET_CODE (dest) == REG)
2819 SET_BIT (reg_set_bitmap, REGNO (dest));
2820 else if (GET_CODE (dest) == MEM)
2821 record_last_mem_set_info (insn);
2823 if (GET_CODE (SET_SRC (pat)) == CALL)
2827 /* Record things set by a CLOBBER. */
2830 mark_clobber (pat, insn)
2833 rtx clob = XEXP (pat, 0);
2835 while (GET_CODE (clob) == SUBREG || GET_CODE (clob) == STRICT_LOW_PART)
2836 clob = XEXP (clob, 0);
2838 if (GET_CODE (clob) == REG)
2839 SET_BIT (reg_set_bitmap, REGNO (clob));
2841 record_last_mem_set_info (insn);
2844 /* Record things set by INSN.
2845 This data is used by oprs_not_set_p. */
2848 mark_oprs_set (insn)
2851 rtx pat = PATTERN (insn);
2854 if (GET_CODE (pat) == SET)
2855 mark_set (pat, insn);
2856 else if (GET_CODE (pat) == PARALLEL)
2857 for (i = 0; i < XVECLEN (pat, 0); i++)
2859 rtx x = XVECEXP (pat, 0, i);
2861 if (GET_CODE (x) == SET)
2863 else if (GET_CODE (x) == CLOBBER)
2864 mark_clobber (x, insn);
2865 else if (GET_CODE (x) == CALL)
2869 else if (GET_CODE (pat) == CLOBBER)
2870 mark_clobber (pat, insn);
2871 else if (GET_CODE (pat) == CALL)
2876 /* Classic GCSE reaching definition support. */
2878 /* Allocate reaching def variables. */
2881 alloc_rd_mem (n_blocks, n_insns)
2882 int n_blocks, n_insns;
2884 rd_kill = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
2885 sbitmap_vector_zero (rd_kill, n_basic_blocks);
2887 rd_gen = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
2888 sbitmap_vector_zero (rd_gen, n_basic_blocks);
2890 reaching_defs = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
2891 sbitmap_vector_zero (reaching_defs, n_basic_blocks);
2893 rd_out = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
2894 sbitmap_vector_zero (rd_out, n_basic_blocks);
2897 /* Free reaching def variables. */
2902 sbitmap_vector_free (rd_kill);
2903 sbitmap_vector_free (rd_gen);
2904 sbitmap_vector_free (reaching_defs);
2905 sbitmap_vector_free (rd_out);
2908 /* Add INSN to the kills of BB. REGNO, set in BB, is killed by INSN. */
2911 handle_rd_kill_set (insn, regno, bb)
2916 struct reg_set *this_reg;
2918 for (this_reg = reg_set_table[regno]; this_reg; this_reg = this_reg ->next)
2919 if (BLOCK_NUM (this_reg->insn) != BLOCK_NUM (insn))
2920 SET_BIT (rd_kill[bb->index], INSN_CUID (this_reg->insn));
2923 /* Compute the set of kill's for reaching definitions. */
2933 For each set bit in `gen' of the block (i.e each insn which
2934 generates a definition in the block)
2935 Call the reg set by the insn corresponding to that bit regx
2936 Look at the linked list starting at reg_set_table[regx]
2937 For each setting of regx in the linked list, which is not in
2939 Set the bit in `kill' corresponding to that insn. */
2940 for (bb = 0; bb < n_basic_blocks; bb++)
2941 for (cuid = 0; cuid < max_cuid; cuid++)
2942 if (TEST_BIT (rd_gen[bb], cuid))
2944 rtx insn = CUID_INSN (cuid);
2945 rtx pat = PATTERN (insn);
2947 if (GET_CODE (insn) == CALL_INSN)
2949 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
2950 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
2951 handle_rd_kill_set (insn, regno, BASIC_BLOCK (bb));
2954 if (GET_CODE (pat) == PARALLEL)
2956 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
2958 enum rtx_code code = GET_CODE (XVECEXP (pat, 0, i));
2960 if ((code == SET || code == CLOBBER)
2961 && GET_CODE (XEXP (XVECEXP (pat, 0, i), 0)) == REG)
2962 handle_rd_kill_set (insn,
2963 REGNO (XEXP (XVECEXP (pat, 0, i), 0)),
2967 else if (GET_CODE (pat) == SET && GET_CODE (SET_DEST (pat)) == REG)
2968 /* Each setting of this register outside of this block
2969 must be marked in the set of kills in this block. */
2970 handle_rd_kill_set (insn, REGNO (SET_DEST (pat)), BASIC_BLOCK (bb));
2974 /* Compute the reaching definitions as in
2975 Compilers Principles, Techniques, and Tools. Aho, Sethi, Ullman,
2976 Chapter 10. It is the same algorithm as used for computing available
2977 expressions but applied to the gens and kills of reaching definitions. */
2982 int bb, changed, passes;
2984 for (bb = 0; bb < n_basic_blocks; bb++)
2985 sbitmap_copy (rd_out[bb] /*dst*/, rd_gen[bb] /*src*/);
2992 for (bb = 0; bb < n_basic_blocks; bb++)
2994 sbitmap_union_of_preds (reaching_defs[bb], rd_out, bb);
2995 changed |= sbitmap_union_of_diff (rd_out[bb], rd_gen[bb],
2996 reaching_defs[bb], rd_kill[bb]);
3002 fprintf (gcse_file, "reaching def computation: %d passes\n", passes);
3005 /* Classic GCSE available expression support. */
3007 /* Allocate memory for available expression computation. */
3010 alloc_avail_expr_mem (n_blocks, n_exprs)
3011 int n_blocks, n_exprs;
3013 ae_kill = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
3014 sbitmap_vector_zero (ae_kill, n_basic_blocks);
3016 ae_gen = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
3017 sbitmap_vector_zero (ae_gen, n_basic_blocks);
3019 ae_in = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
3020 sbitmap_vector_zero (ae_in, n_basic_blocks);
3022 ae_out = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
3023 sbitmap_vector_zero (ae_out, n_basic_blocks);
3027 free_avail_expr_mem ()
3029 sbitmap_vector_free (ae_kill);
3030 sbitmap_vector_free (ae_gen);
3031 sbitmap_vector_free (ae_in);
3032 sbitmap_vector_free (ae_out);
3035 /* Compute the set of available expressions generated in each basic block. */
3044 /* For each recorded occurrence of each expression, set ae_gen[bb][expr].
3045 This is all we have to do because an expression is not recorded if it
3046 is not available, and the only expressions we want to work with are the
3047 ones that are recorded. */
3048 for (i = 0; i < expr_hash_table_size; i++)
3049 for (expr = expr_hash_table[i]; expr != 0; expr = expr->next_same_hash)
3050 for (occr = expr->avail_occr; occr != 0; occr = occr->next)
3051 SET_BIT (ae_gen[BLOCK_NUM (occr->insn)], expr->bitmap_index);
3054 /* Return non-zero if expression X is killed in BB. */
3057 expr_killed_p (x, bb)
3068 code = GET_CODE (x);
3072 return TEST_BIT (reg_set_in_block[bb->index], REGNO (x));
3075 if (load_killed_in_block_p (bb, get_max_uid () + 1, x, 0))
3078 return expr_killed_p (XEXP (x, 0), bb);
3095 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
3099 /* If we are about to do the last recursive call
3100 needed at this level, change it into iteration.
3101 This function is called enough to be worth it. */
3103 return expr_killed_p (XEXP (x, i), bb);
3104 else if (expr_killed_p (XEXP (x, i), bb))
3107 else if (fmt[i] == 'E')
3108 for (j = 0; j < XVECLEN (x, i); j++)
3109 if (expr_killed_p (XVECEXP (x, i, j), bb))
3116 /* Compute the set of available expressions killed in each basic block. */
3119 compute_ae_kill (ae_gen, ae_kill)
3120 sbitmap *ae_gen, *ae_kill;
3126 for (bb = 0; bb < n_basic_blocks; bb++)
3127 for (i = 0; i < expr_hash_table_size; i++)
3128 for (expr = expr_hash_table[i]; expr; expr = expr->next_same_hash)
3130 /* Skip EXPR if generated in this block. */
3131 if (TEST_BIT (ae_gen[bb], expr->bitmap_index))
3134 if (expr_killed_p (expr->expr, BASIC_BLOCK (bb)))
3135 SET_BIT (ae_kill[bb], expr->bitmap_index);
3139 /* Actually perform the Classic GCSE optimizations. */
3141 /* Return non-zero if occurrence OCCR of expression EXPR reaches block BB.
3143 CHECK_SELF_LOOP is non-zero if we should consider a block reaching itself
3144 as a positive reach. We want to do this when there are two computations
3145 of the expression in the block.
3147 VISITED is a pointer to a working buffer for tracking which BB's have
3148 been visited. It is NULL for the top-level call.
3150 We treat reaching expressions that go through blocks containing the same
3151 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
3152 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
3153 2 as not reaching. The intent is to improve the probability of finding
3154 only one reaching expression and to reduce register lifetimes by picking
3155 the closest such expression. */
3158 expr_reaches_here_p_work (occr, expr, bb, check_self_loop, visited)
3162 int check_self_loop;
3167 for (pred = bb->pred; pred != NULL; pred = pred->pred_next)
3169 basic_block pred_bb = pred->src;
3171 if (visited[pred_bb->index])
3172 /* This predecessor has already been visited. Nothing to do. */
3174 else if (pred_bb == bb)
3176 /* BB loops on itself. */
3178 && TEST_BIT (ae_gen[pred_bb->index], expr->bitmap_index)
3179 && BLOCK_NUM (occr->insn) == pred_bb->index)
3182 visited[pred_bb->index] = 1;
3185 /* Ignore this predecessor if it kills the expression. */
3186 else if (TEST_BIT (ae_kill[pred_bb->index], expr->bitmap_index))
3187 visited[pred_bb->index] = 1;
3189 /* Does this predecessor generate this expression? */
3190 else if (TEST_BIT (ae_gen[pred_bb->index], expr->bitmap_index))
3192 /* Is this the occurrence we're looking for?
3193 Note that there's only one generating occurrence per block
3194 so we just need to check the block number. */
3195 if (BLOCK_NUM (occr->insn) == pred_bb->index)
3198 visited[pred_bb->index] = 1;
3201 /* Neither gen nor kill. */
3204 visited[pred_bb->index] = 1;
3205 if (expr_reaches_here_p_work (occr, expr, pred_bb, check_self_loop,
3212 /* All paths have been checked. */
3216 /* This wrapper for expr_reaches_here_p_work() is to ensure that any
3217 memory allocated for that function is returned. */
3220 expr_reaches_here_p (occr, expr, bb, check_self_loop)
3224 int check_self_loop;
3227 char *visited = (char *) xcalloc (n_basic_blocks, 1);
3229 rval = expr_reaches_here_p_work (occr, expr, bb, check_self_loop, visited);
3235 /* Return the instruction that computes EXPR that reaches INSN's basic block.
3236 If there is more than one such instruction, return NULL.
3238 Called only by handle_avail_expr. */
3241 computing_insn (expr, insn)
3245 basic_block bb = BLOCK_FOR_INSN (insn);
3247 if (expr->avail_occr->next == NULL)
3249 if (BLOCK_FOR_INSN (expr->avail_occr->insn) == bb)
3250 /* The available expression is actually itself
3251 (i.e. a loop in the flow graph) so do nothing. */
3254 /* (FIXME) Case that we found a pattern that was created by
3255 a substitution that took place. */
3256 return expr->avail_occr->insn;
3260 /* Pattern is computed more than once.
3261 Search backwards from this insn to see how many of these
3262 computations actually reach this insn. */
3264 rtx insn_computes_expr = NULL;
3267 for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
3269 if (BLOCK_FOR_INSN (occr->insn) == bb)
3271 /* The expression is generated in this block.
3272 The only time we care about this is when the expression
3273 is generated later in the block [and thus there's a loop].
3274 We let the normal cse pass handle the other cases. */
3275 if (INSN_CUID (insn) < INSN_CUID (occr->insn)
3276 && expr_reaches_here_p (occr, expr, bb, 1))
3282 insn_computes_expr = occr->insn;
3285 else if (expr_reaches_here_p (occr, expr, bb, 0))
3291 insn_computes_expr = occr->insn;
3295 if (insn_computes_expr == NULL)
3298 return insn_computes_expr;
3302 /* Return non-zero if the definition in DEF_INSN can reach INSN.
3303 Only called by can_disregard_other_sets. */
3306 def_reaches_here_p (insn, def_insn)
3311 if (TEST_BIT (reaching_defs[BLOCK_NUM (insn)], INSN_CUID (def_insn)))
3314 if (BLOCK_NUM (insn) == BLOCK_NUM (def_insn))
3316 if (INSN_CUID (def_insn) < INSN_CUID (insn))
3318 if (GET_CODE (PATTERN (def_insn)) == PARALLEL)
3320 else if (GET_CODE (PATTERN (def_insn)) == CLOBBER)
3321 reg = XEXP (PATTERN (def_insn), 0);
3322 else if (GET_CODE (PATTERN (def_insn)) == SET)
3323 reg = SET_DEST (PATTERN (def_insn));
3327 return ! reg_set_between_p (reg, NEXT_INSN (def_insn), insn);
3336 /* Return non-zero if *ADDR_THIS_REG can only have one value at INSN. The
3337 value returned is the number of definitions that reach INSN. Returning a
3338 value of zero means that [maybe] more than one definition reaches INSN and
3339 the caller can't perform whatever optimization it is trying. i.e. it is
3340 always safe to return zero. */
3343 can_disregard_other_sets (addr_this_reg, insn, for_combine)
3344 struct reg_set **addr_this_reg;
3348 int number_of_reaching_defs = 0;
3349 struct reg_set *this_reg;
3351 for (this_reg = *addr_this_reg; this_reg != 0; this_reg = this_reg->next)
3352 if (def_reaches_here_p (insn, this_reg->insn))
3354 number_of_reaching_defs++;
3355 /* Ignore parallels for now. */
3356 if (GET_CODE (PATTERN (this_reg->insn)) == PARALLEL)
3360 && (GET_CODE (PATTERN (this_reg->insn)) == CLOBBER
3361 || ! rtx_equal_p (SET_SRC (PATTERN (this_reg->insn)),
3362 SET_SRC (PATTERN (insn)))))
3363 /* A setting of the reg to a different value reaches INSN. */
3366 if (number_of_reaching_defs > 1)
3368 /* If in this setting the value the register is being set to is
3369 equal to the previous value the register was set to and this
3370 setting reaches the insn we are trying to do the substitution
3371 on then we are ok. */
3372 if (GET_CODE (PATTERN (this_reg->insn)) == CLOBBER)
3374 else if (! rtx_equal_p (SET_SRC (PATTERN (this_reg->insn)),
3375 SET_SRC (PATTERN (insn))))
3379 *addr_this_reg = this_reg;
3382 return number_of_reaching_defs;
3385 /* Expression computed by insn is available and the substitution is legal,
3386 so try to perform the substitution.
3388 The result is non-zero if any changes were made. */
3391 handle_avail_expr (insn, expr)
3395 rtx pat, insn_computes_expr, expr_set;
3397 struct reg_set *this_reg;
3398 int found_setting, use_src;
3401 /* We only handle the case where one computation of the expression
3402 reaches this instruction. */
3403 insn_computes_expr = computing_insn (expr, insn);
3404 if (insn_computes_expr == NULL)
3406 expr_set = single_set (insn_computes_expr);
3413 /* At this point we know only one computation of EXPR outside of this
3414 block reaches this insn. Now try to find a register that the
3415 expression is computed into. */
3416 if (GET_CODE (SET_SRC (expr_set)) == REG)
3418 /* This is the case when the available expression that reaches
3419 here has already been handled as an available expression. */
3420 unsigned int regnum_for_replacing
3421 = REGNO (SET_SRC (expr_set));
3423 /* If the register was created by GCSE we can't use `reg_set_table',
3424 however we know it's set only once. */
3425 if (regnum_for_replacing >= max_gcse_regno
3426 /* If the register the expression is computed into is set only once,
3427 or only one set reaches this insn, we can use it. */
3428 || (((this_reg = reg_set_table[regnum_for_replacing]),
3429 this_reg->next == NULL)
3430 || can_disregard_other_sets (&this_reg, insn, 0)))
3439 unsigned int regnum_for_replacing
3440 = REGNO (SET_DEST (expr_set));
3442 /* This shouldn't happen. */
3443 if (regnum_for_replacing >= max_gcse_regno)
3446 this_reg = reg_set_table[regnum_for_replacing];
3448 /* If the register the expression is computed into is set only once,
3449 or only one set reaches this insn, use it. */
3450 if (this_reg->next == NULL
3451 || can_disregard_other_sets (&this_reg, insn, 0))
3457 pat = PATTERN (insn);
3459 to = SET_SRC (expr_set);
3461 to = SET_DEST (expr_set);
3462 changed = validate_change (insn, &SET_SRC (pat), to, 0);
3464 /* We should be able to ignore the return code from validate_change but
3465 to play it safe we check. */
3469 if (gcse_file != NULL)
3471 fprintf (gcse_file, "GCSE: Replacing the source in insn %d with",
3473 fprintf (gcse_file, " reg %d %s insn %d\n",
3474 REGNO (to), use_src ? "from" : "set in",
3475 INSN_UID (insn_computes_expr));
3480 /* The register that the expr is computed into is set more than once. */
3481 else if (1 /*expensive_op(this_pattrn->op) && do_expensive_gcse)*/)
3483 /* Insert an insn after insnx that copies the reg set in insnx
3484 into a new pseudo register call this new register REGN.
3485 From insnb until end of basic block or until REGB is set
3486 replace all uses of REGB with REGN. */
3489 to = gen_reg_rtx (GET_MODE (SET_DEST (expr_set)));
3491 /* Generate the new insn. */
3492 /* ??? If the change fails, we return 0, even though we created
3493 an insn. I think this is ok. */
3495 = emit_insn_after (gen_rtx_SET (VOIDmode, to,
3496 SET_DEST (expr_set)),
3497 insn_computes_expr);
3499 /* Keep block number table up to date. */
3500 set_block_for_new_insns (new_insn, BLOCK_FOR_INSN (insn_computes_expr));
3502 /* Keep register set table up to date. */
3503 record_one_set (REGNO (to), new_insn);
3505 gcse_create_count++;
3506 if (gcse_file != NULL)
3508 fprintf (gcse_file, "GCSE: Creating insn %d to copy value of reg %d",
3509 INSN_UID (NEXT_INSN (insn_computes_expr)),
3510 REGNO (SET_SRC (PATTERN (NEXT_INSN (insn_computes_expr)))));
3511 fprintf (gcse_file, ", computed in insn %d,\n",
3512 INSN_UID (insn_computes_expr));
3513 fprintf (gcse_file, " into newly allocated reg %d\n",
3517 pat = PATTERN (insn);
3519 /* Do register replacement for INSN. */
3520 changed = validate_change (insn, &SET_SRC (pat),
3522 (NEXT_INSN (insn_computes_expr))),
3525 /* We should be able to ignore the return code from validate_change but
3526 to play it safe we check. */
3530 if (gcse_file != NULL)
3533 "GCSE: Replacing the source in insn %d with reg %d ",
3535 REGNO (SET_DEST (PATTERN (NEXT_INSN
3536 (insn_computes_expr)))));
3537 fprintf (gcse_file, "set in insn %d\n",
3538 INSN_UID (insn_computes_expr));
3546 /* Perform classic GCSE. This is called by one_classic_gcse_pass after all
3547 the dataflow analysis has been done.
3549 The result is non-zero if a change was made. */
3557 /* Note we start at block 1. */
3560 for (bb = 1; bb < n_basic_blocks; bb++)
3562 /* Reset tables used to keep track of what's still valid [since the
3563 start of the block]. */
3564 reset_opr_set_tables ();
3566 for (insn = BLOCK_HEAD (bb);
3567 insn != NULL && insn != NEXT_INSN (BLOCK_END (bb));
3568 insn = NEXT_INSN (insn))
3570 /* Is insn of form (set (pseudo-reg) ...)? */
3571 if (GET_CODE (insn) == INSN
3572 && GET_CODE (PATTERN (insn)) == SET
3573 && GET_CODE (SET_DEST (PATTERN (insn))) == REG
3574 && REGNO (SET_DEST (PATTERN (insn))) >= FIRST_PSEUDO_REGISTER)
3576 rtx pat = PATTERN (insn);
3577 rtx src = SET_SRC (pat);
3580 if (want_to_gcse_p (src)
3581 /* Is the expression recorded? */
3582 && ((expr = lookup_expr (src)) != NULL)
3583 /* Is the expression available [at the start of the
3585 && TEST_BIT (ae_in[bb], expr->bitmap_index)
3586 /* Are the operands unchanged since the start of the
3588 && oprs_not_set_p (src, insn))
3589 changed |= handle_avail_expr (insn, expr);
3592 /* Keep track of everything modified by this insn. */
3593 /* ??? Need to be careful w.r.t. mods done to INSN. */
3595 mark_oprs_set (insn);
3602 /* Top level routine to perform one classic GCSE pass.
3604 Return non-zero if a change was made. */
3607 one_classic_gcse_pass (pass)
3612 gcse_subst_count = 0;
3613 gcse_create_count = 0;
3615 alloc_expr_hash_table (max_cuid);
3616 alloc_rd_mem (n_basic_blocks, max_cuid);
3617 compute_expr_hash_table ();
3619 dump_hash_table (gcse_file, "Expression", expr_hash_table,
3620 expr_hash_table_size, n_exprs);
3626 alloc_avail_expr_mem (n_basic_blocks, n_exprs);
3628 compute_ae_kill (ae_gen, ae_kill);
3629 compute_available (ae_gen, ae_kill, ae_out, ae_in);
3630 changed = classic_gcse ();
3631 free_avail_expr_mem ();
3635 free_expr_hash_table ();
3639 fprintf (gcse_file, "\n");
3640 fprintf (gcse_file, "GCSE of %s, pass %d: %d bytes needed, %d substs,",
3641 current_function_name, pass, bytes_used, gcse_subst_count);
3642 fprintf (gcse_file, "%d insns created\n", gcse_create_count);
3648 /* Compute copy/constant propagation working variables. */
3650 /* Local properties of assignments. */
3651 static sbitmap *cprop_pavloc;
3652 static sbitmap *cprop_absaltered;
3654 /* Global properties of assignments (computed from the local properties). */
3655 static sbitmap *cprop_avin;
3656 static sbitmap *cprop_avout;
3658 /* Allocate vars used for copy/const propagation. N_BLOCKS is the number of
3659 basic blocks. N_SETS is the number of sets. */
3662 alloc_cprop_mem (n_blocks, n_sets)
3663 int n_blocks, n_sets;
3665 cprop_pavloc = sbitmap_vector_alloc (n_blocks, n_sets);
3666 cprop_absaltered = sbitmap_vector_alloc (n_blocks, n_sets);
3668 cprop_avin = sbitmap_vector_alloc (n_blocks, n_sets);
3669 cprop_avout = sbitmap_vector_alloc (n_blocks, n_sets);
3672 /* Free vars used by copy/const propagation. */
3677 sbitmap_vector_free (cprop_pavloc);
3678 sbitmap_vector_free (cprop_absaltered);
3679 sbitmap_vector_free (cprop_avin);
3680 sbitmap_vector_free (cprop_avout);
3683 /* For each block, compute whether X is transparent. X is either an
3684 expression or an assignment [though we don't care which, for this context
3685 an assignment is treated as an expression]. For each block where an
3686 element of X is modified, set (SET_P == 1) or reset (SET_P == 0) the INDX
3690 compute_transp (x, indx, bmap, set_p)
3701 /* repeat is used to turn tail-recursion into iteration since GCC
3702 can't do it when there's no return value. */
3708 code = GET_CODE (x);
3714 if (REGNO (x) < FIRST_PSEUDO_REGISTER)
3716 for (bb = 0; bb < n_basic_blocks; bb++)
3717 if (TEST_BIT (reg_set_in_block[bb], REGNO (x)))
3718 SET_BIT (bmap[bb], indx);
3722 for (r = reg_set_table[REGNO (x)]; r != NULL; r = r->next)
3723 SET_BIT (bmap[BLOCK_NUM (r->insn)], indx);
3728 if (REGNO (x) < FIRST_PSEUDO_REGISTER)
3730 for (bb = 0; bb < n_basic_blocks; bb++)
3731 if (TEST_BIT (reg_set_in_block[bb], REGNO (x)))
3732 RESET_BIT (bmap[bb], indx);
3736 for (r = reg_set_table[REGNO (x)]; r != NULL; r = r->next)
3737 RESET_BIT (bmap[BLOCK_NUM (r->insn)], indx);
3744 for (bb = 0; bb < n_basic_blocks; bb++)
3746 rtx list_entry = canon_modify_mem_list[bb];
3750 rtx dest, dest_addr;
3752 if (GET_CODE (XEXP (list_entry, 0)) == CALL_INSN)
3755 SET_BIT (bmap[bb], indx);
3757 RESET_BIT (bmap[bb], indx);
3760 /* LIST_ENTRY must be an INSN of some kind that sets memory.
3761 Examine each hunk of memory that is modified. */
3763 dest = XEXP (list_entry, 0);
3764 list_entry = XEXP (list_entry, 1);
3765 dest_addr = XEXP (list_entry, 0);
3767 if (canon_true_dependence (dest, GET_MODE (dest), dest_addr,
3768 x, rtx_addr_varies_p))
3771 SET_BIT (bmap[bb], indx);
3773 RESET_BIT (bmap[bb], indx);
3776 list_entry = XEXP (list_entry, 1);
3798 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
3802 /* If we are about to do the last recursive call
3803 needed at this level, change it into iteration.
3804 This function is called enough to be worth it. */
3811 compute_transp (XEXP (x, i), indx, bmap, set_p);
3813 else if (fmt[i] == 'E')
3814 for (j = 0; j < XVECLEN (x, i); j++)
3815 compute_transp (XVECEXP (x, i, j), indx, bmap, set_p);
3819 /* Top level routine to do the dataflow analysis needed by copy/const
3823 compute_cprop_data ()
3825 compute_local_properties (cprop_absaltered, cprop_pavloc, NULL, 1);
3826 compute_available (cprop_pavloc, cprop_absaltered,
3827 cprop_avout, cprop_avin);
3830 /* Copy/constant propagation. */
3832 /* Maximum number of register uses in an insn that we handle. */
3835 /* Table of uses found in an insn.
3836 Allocated statically to avoid alloc/free complexity and overhead. */
3837 static struct reg_use reg_use_table[MAX_USES];
3839 /* Index into `reg_use_table' while building it. */
3840 static int reg_use_count;
3842 /* Set up a list of register numbers used in INSN. The found uses are stored
3843 in `reg_use_table'. `reg_use_count' is initialized to zero before entry,
3844 and contains the number of uses in the table upon exit.
3846 ??? If a register appears multiple times we will record it multiple times.
3847 This doesn't hurt anything but it will slow things down. */
3850 find_used_regs (xptr, data)
3852 void *data ATTRIBUTE_UNUSED;
3859 /* repeat is used to turn tail-recursion into iteration since GCC
3860 can't do it when there's no return value. */
3865 code = GET_CODE (x);
3868 if (reg_use_count == MAX_USES)
3871 reg_use_table[reg_use_count].reg_rtx = x;
3875 /* Recursively scan the operands of this expression. */
3877 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
3881 /* If we are about to do the last recursive call
3882 needed at this level, change it into iteration.
3883 This function is called enough to be worth it. */
3890 find_used_regs (&XEXP (x, i), data);
3892 else if (fmt[i] == 'E')
3893 for (j = 0; j < XVECLEN (x, i); j++)
3894 find_used_regs (&XVECEXP (x, i, j), data);
3898 /* Try to replace all non-SET_DEST occurrences of FROM in INSN with TO.
3899 Returns non-zero is successful. */
3902 try_replace_reg (from, to, insn)
3905 rtx note = find_reg_equal_equiv_note (insn);
3908 rtx set = single_set (insn);
3910 success = validate_replace_src (from, to, insn);
3912 /* If above failed and this is a single set, try to simplify the source of
3913 the set given our substitution. We could perhaps try this for multiple
3914 SETs, but it probably won't buy us anything. */
3915 if (!success && set != 0)
3917 src = simplify_replace_rtx (SET_SRC (set), from, to);
3919 if (!rtx_equal_p (src, SET_SRC (set))
3920 && validate_change (insn, &SET_SRC (set), src, 0))
3924 /* If we've failed to do replacement, have a single SET, and don't already
3925 have a note, add a REG_EQUAL note to not lose information. */
3926 if (!success && note == 0 && set != 0)
3927 note = REG_NOTES (insn)
3928 = gen_rtx_EXPR_LIST (REG_EQUAL, src, REG_NOTES (insn));
3930 /* If there is already a NOTE, update the expression in it with our
3933 XEXP (note, 0) = simplify_replace_rtx (XEXP (note, 0), from, to);
3935 /* REG_EQUAL may get simplified into register.
3936 We don't allow that. Remove that note. This code ought
3937 not to hapen, because previous code ought to syntetize
3938 reg-reg move, but be on the safe side. */
3939 if (note && REG_P (XEXP (note, 0)))
3940 remove_note (insn, note);
3945 /* Find a set of REGNOs that are available on entry to INSN's block. Returns
3946 NULL no such set is found. */
3948 static struct expr *
3949 find_avail_set (regno, insn)
3953 /* SET1 contains the last set found that can be returned to the caller for
3954 use in a substitution. */
3955 struct expr *set1 = 0;
3957 /* Loops are not possible here. To get a loop we would need two sets
3958 available at the start of the block containing INSN. ie we would
3959 need two sets like this available at the start of the block:
3961 (set (reg X) (reg Y))
3962 (set (reg Y) (reg X))
3964 This can not happen since the set of (reg Y) would have killed the
3965 set of (reg X) making it unavailable at the start of this block. */
3969 struct expr *set = lookup_set (regno, NULL_RTX);
3971 /* Find a set that is available at the start of the block
3972 which contains INSN. */
3975 if (TEST_BIT (cprop_avin[BLOCK_NUM (insn)], set->bitmap_index))
3977 set = next_set (regno, set);
3980 /* If no available set was found we've reached the end of the
3981 (possibly empty) copy chain. */
3985 if (GET_CODE (set->expr) != SET)
3988 src = SET_SRC (set->expr);
3990 /* We know the set is available.
3991 Now check that SRC is ANTLOC (i.e. none of the source operands
3992 have changed since the start of the block).
3994 If the source operand changed, we may still use it for the next
3995 iteration of this loop, but we may not use it for substitutions. */
3997 if (CONSTANT_P (src) || oprs_not_set_p (src, insn))
4000 /* If the source of the set is anything except a register, then
4001 we have reached the end of the copy chain. */
4002 if (GET_CODE (src) != REG)
4005 /* Follow the copy chain, ie start another iteration of the loop
4006 and see if we have an available copy into SRC. */
4007 regno = REGNO (src);
4010 /* SET1 holds the last set that was available and anticipatable at
4015 /* Subroutine of cprop_insn that tries to propagate constants into
4016 JUMP_INSNS. INSN must be a conditional jump. FROM is what we will try to
4017 replace, SRC is the constant we will try to substitute for it. Returns
4018 nonzero if a change was made. We know INSN has just a SET. */
4021 cprop_jump (bb, insn, from, src)
4027 rtx set = PATTERN (insn);
4028 rtx new = simplify_replace_rtx (SET_SRC (set), from, src);
4030 /* If no simplification can be made, then try the next
4032 if (rtx_equal_p (new, SET_SRC (set)))
4035 /* If this is now a no-op leave it that way, but update LABEL_NUSED if
4039 SET_SRC (set) = new;
4041 if (JUMP_LABEL (insn) != 0)
4042 --LABEL_NUSES (JUMP_LABEL (insn));
4045 /* Otherwise, this must be a valid instruction. */
4046 else if (! validate_change (insn, &SET_SRC (set), new, 0))
4049 /* If this has turned into an unconditional jump,
4050 then put a barrier after it so that the unreachable
4051 code will be deleted. */
4052 if (GET_CODE (SET_SRC (set)) == LABEL_REF)
4053 emit_barrier_after (insn);
4055 run_jump_opt_after_gcse = 1;
4058 if (gcse_file != NULL)
4061 "CONST-PROP: Replacing reg %d in insn %d with constant ",
4062 REGNO (from), INSN_UID (insn));
4063 print_rtl (gcse_file, src);
4064 fprintf (gcse_file, "\n");
4066 purge_dead_edges (bb);
4073 /* Subroutine of cprop_insn that tries to propagate constants into JUMP_INSNS
4074 for machines that have CC0. INSN is a single set that stores into CC0;
4075 the insn following it is a conditional jump. REG_USED is the use we will
4076 try to replace, SRC is the constant we will try to substitute for it.
4077 Returns nonzero if a change was made. */
4080 cprop_cc0_jump (bb, insn, reg_used, src)
4083 struct reg_use *reg_used;
4086 /* First substitute in the SET_SRC of INSN, then substitute that for
4088 rtx jump = NEXT_INSN (insn);
4089 rtx new_src = simplify_replace_rtx (SET_SRC (PATTERN (insn)),
4090 reg_used->reg_rtx, src);
4092 if (! cprop_jump (bb, jump, cc0_rtx, new_src))
4095 /* If we succeeded, delete the cc0 setter. */
4096 PUT_CODE (insn, NOTE);
4097 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
4098 NOTE_SOURCE_FILE (insn) = 0;
4104 /* Perform constant and copy propagation on INSN.
4105 The result is non-zero if a change was made. */
4108 cprop_insn (bb, insn, alter_jumps)
4113 struct reg_use *reg_used;
4121 note_uses (&PATTERN (insn), find_used_regs, NULL);
4123 note = find_reg_equal_equiv_note (insn);
4125 /* We may win even when propagating constants into notes. */
4127 find_used_regs (&XEXP (note, 0), NULL);
4129 for (reg_used = ®_use_table[0]; reg_use_count > 0;
4130 reg_used++, reg_use_count--)
4132 unsigned int regno = REGNO (reg_used->reg_rtx);
4136 /* Ignore registers created by GCSE.
4137 We do this because ... */
4138 if (regno >= max_gcse_regno)
4141 /* If the register has already been set in this block, there's
4142 nothing we can do. */
4143 if (! oprs_not_set_p (reg_used->reg_rtx, insn))
4146 /* Find an assignment that sets reg_used and is available
4147 at the start of the block. */
4148 set = find_avail_set (regno, insn);
4153 /* ??? We might be able to handle PARALLELs. Later. */
4154 if (GET_CODE (pat) != SET)
4157 src = SET_SRC (pat);
4159 /* Constant propagation. */
4160 if (GET_CODE (src) == CONST_INT || GET_CODE (src) == CONST_DOUBLE
4161 || GET_CODE (src) == SYMBOL_REF)
4163 /* Handle normal insns first. */
4164 if (GET_CODE (insn) == INSN
4165 && try_replace_reg (reg_used->reg_rtx, src, insn))
4169 if (gcse_file != NULL)
4171 fprintf (gcse_file, "CONST-PROP: Replacing reg %d in ",
4173 fprintf (gcse_file, "insn %d with constant ",
4175 print_rtl (gcse_file, src);
4176 fprintf (gcse_file, "\n");
4179 /* The original insn setting reg_used may or may not now be
4180 deletable. We leave the deletion to flow. */
4183 /* Try to propagate a CONST_INT into a conditional jump.
4184 We're pretty specific about what we will handle in this
4185 code, we can extend this as necessary over time.
4187 Right now the insn in question must look like
4188 (set (pc) (if_then_else ...)) */
4189 else if (alter_jumps
4190 && GET_CODE (insn) == JUMP_INSN
4191 && condjump_p (insn)
4192 && ! simplejump_p (insn))
4193 changed |= cprop_jump (bb, insn, reg_used->reg_rtx, src);
4196 /* Similar code for machines that use a pair of CC0 setter and
4197 conditional jump insn. */
4198 else if (alter_jumps
4199 && GET_CODE (PATTERN (insn)) == SET
4200 && SET_DEST (PATTERN (insn)) == cc0_rtx
4201 && GET_CODE (NEXT_INSN (insn)) == JUMP_INSN
4202 && condjump_p (NEXT_INSN (insn))
4203 && ! simplejump_p (NEXT_INSN (insn))
4204 && cprop_cc0_jump (bb, insn, reg_used, src))
4211 else if (GET_CODE (src) == REG
4212 && REGNO (src) >= FIRST_PSEUDO_REGISTER
4213 && REGNO (src) != regno)
4215 if (try_replace_reg (reg_used->reg_rtx, src, insn))
4219 if (gcse_file != NULL)
4221 fprintf (gcse_file, "COPY-PROP: Replacing reg %d in insn %d",
4222 regno, INSN_UID (insn));
4223 fprintf (gcse_file, " with reg %d\n", REGNO (src));
4226 /* The original insn setting reg_used may or may not now be
4227 deletable. We leave the deletion to flow. */
4228 /* FIXME: If it turns out that the insn isn't deletable,
4229 then we may have unnecessarily extended register lifetimes
4230 and made things worse. */
4238 /* Forward propagate copies. This includes copies and constants. Return
4239 non-zero if a change was made. */
4248 /* Note we start at block 1. */
4251 for (bb = 1; bb < n_basic_blocks; bb++)
4253 /* Reset tables used to keep track of what's still valid [since the
4254 start of the block]. */
4255 reset_opr_set_tables ();
4257 for (insn = BLOCK_HEAD (bb);
4258 insn != NULL && insn != NEXT_INSN (BLOCK_END (bb));
4259 insn = NEXT_INSN (insn))
4262 changed |= cprop_insn (BASIC_BLOCK (bb), insn, alter_jumps);
4264 /* Keep track of everything modified by this insn. */
4265 /* ??? Need to be careful w.r.t. mods done to INSN. Don't
4266 call mark_oprs_set if we turned the insn into a NOTE. */
4267 if (GET_CODE (insn) != NOTE)
4268 mark_oprs_set (insn);
4272 if (gcse_file != NULL)
4273 fprintf (gcse_file, "\n");
4278 /* Perform one copy/constant propagation pass.
4279 F is the first insn in the function.
4280 PASS is the pass count. */
4283 one_cprop_pass (pass, alter_jumps)
4289 const_prop_count = 0;
4290 copy_prop_count = 0;
4292 alloc_set_hash_table (max_cuid);
4293 compute_set_hash_table ();
4295 dump_hash_table (gcse_file, "SET", set_hash_table, set_hash_table_size,
4299 alloc_cprop_mem (n_basic_blocks, n_sets);
4300 compute_cprop_data ();
4301 changed = cprop (alter_jumps);
4305 free_set_hash_table ();
4309 fprintf (gcse_file, "CPROP of %s, pass %d: %d bytes needed, ",
4310 current_function_name, pass, bytes_used);
4311 fprintf (gcse_file, "%d const props, %d copy props\n\n",
4312 const_prop_count, copy_prop_count);
4318 /* Compute PRE+LCM working variables. */
4320 /* Local properties of expressions. */
4321 /* Nonzero for expressions that are transparent in the block. */
4322 static sbitmap *transp;
4324 /* Nonzero for expressions that are transparent at the end of the block.
4325 This is only zero for expressions killed by abnormal critical edge
4326 created by a calls. */
4327 static sbitmap *transpout;
4329 /* Nonzero for expressions that are computed (available) in the block. */
4330 static sbitmap *comp;
4332 /* Nonzero for expressions that are locally anticipatable in the block. */
4333 static sbitmap *antloc;
4335 /* Nonzero for expressions where this block is an optimal computation
4337 static sbitmap *pre_optimal;
4339 /* Nonzero for expressions which are redundant in a particular block. */
4340 static sbitmap *pre_redundant;
4342 /* Nonzero for expressions which should be inserted on a specific edge. */
4343 static sbitmap *pre_insert_map;
4345 /* Nonzero for expressions which should be deleted in a specific block. */
4346 static sbitmap *pre_delete_map;
4348 /* Contains the edge_list returned by pre_edge_lcm. */
4349 static struct edge_list *edge_list;
4351 /* Redundant insns. */
4352 static sbitmap pre_redundant_insns;
4354 /* Allocate vars used for PRE analysis. */
4357 alloc_pre_mem (n_blocks, n_exprs)
4358 int n_blocks, n_exprs;
4360 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
4361 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
4362 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
4365 pre_redundant = NULL;
4366 pre_insert_map = NULL;
4367 pre_delete_map = NULL;
4370 ae_kill = sbitmap_vector_alloc (n_blocks, n_exprs);
4372 /* pre_insert and pre_delete are allocated later. */
4375 /* Free vars used for PRE analysis. */
4380 sbitmap_vector_free (transp);
4381 sbitmap_vector_free (comp);
4383 /* ANTLOC and AE_KILL are freed just after pre_lcm finishes. */
4386 sbitmap_vector_free (pre_optimal);
4388 sbitmap_vector_free (pre_redundant);
4390 sbitmap_vector_free (pre_insert_map);
4392 sbitmap_vector_free (pre_delete_map);
4394 sbitmap_vector_free (ae_in);
4396 sbitmap_vector_free (ae_out);
4398 transp = comp = NULL;
4399 pre_optimal = pre_redundant = pre_insert_map = pre_delete_map = NULL;
4400 ae_in = ae_out = NULL;
4403 /* Top level routine to do the dataflow analysis needed by PRE. */
4408 sbitmap trapping_expr;
4412 compute_local_properties (transp, comp, antloc, 0);
4413 sbitmap_vector_zero (ae_kill, n_basic_blocks);
4415 /* Collect expressions which might trap. */
4416 trapping_expr = sbitmap_alloc (n_exprs);
4417 sbitmap_zero (trapping_expr);
4418 for (ui = 0; ui < expr_hash_table_size; ui++)
4421 for (e = expr_hash_table[ui]; e != NULL; e = e->next_same_hash)
4422 if (may_trap_p (e->expr))
4423 SET_BIT (trapping_expr, e->bitmap_index);
4426 /* Compute ae_kill for each basic block using:
4430 This is significantly faster than compute_ae_kill. */
4432 for (i = 0; i < n_basic_blocks; i++)
4436 /* If the current block is the destination of an abnormal edge, we
4437 kill all trapping expressions because we won't be able to properly
4438 place the instruction on the edge. So make them neither
4439 anticipatable nor transparent. This is fairly conservative. */
4440 for (e = BASIC_BLOCK (i)->pred; e ; e = e->pred_next)
4441 if (e->flags & EDGE_ABNORMAL)
4443 sbitmap_difference (antloc[i], antloc[i], trapping_expr);
4444 sbitmap_difference (transp[i], transp[i], trapping_expr);
4448 sbitmap_a_or_b (ae_kill[i], transp[i], comp[i]);
4449 sbitmap_not (ae_kill[i], ae_kill[i]);
4452 edge_list = pre_edge_lcm (gcse_file, n_exprs, transp, comp, antloc,
4453 ae_kill, &pre_insert_map, &pre_delete_map);
4454 sbitmap_vector_free (antloc);
4456 sbitmap_vector_free (ae_kill);
4458 free (trapping_expr);
4463 /* Return non-zero if an occurrence of expression EXPR in OCCR_BB would reach
4466 VISITED is a pointer to a working buffer for tracking which BB's have
4467 been visited. It is NULL for the top-level call.
4469 We treat reaching expressions that go through blocks containing the same
4470 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
4471 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
4472 2 as not reaching. The intent is to improve the probability of finding
4473 only one reaching expression and to reduce register lifetimes by picking
4474 the closest such expression. */
4477 pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited)
4478 basic_block occr_bb;
4485 for (pred = bb->pred; pred != NULL; pred = pred->pred_next)
4487 basic_block pred_bb = pred->src;
4489 if (pred->src == ENTRY_BLOCK_PTR
4490 /* Has predecessor has already been visited? */
4491 || visited[pred_bb->index])
4492 ;/* Nothing to do. */
4494 /* Does this predecessor generate this expression? */
4495 else if (TEST_BIT (comp[pred_bb->index], expr->bitmap_index))
4497 /* Is this the occurrence we're looking for?
4498 Note that there's only one generating occurrence per block
4499 so we just need to check the block number. */
4500 if (occr_bb == pred_bb)
4503 visited[pred_bb->index] = 1;
4505 /* Ignore this predecessor if it kills the expression. */
4506 else if (! TEST_BIT (transp[pred_bb->index], expr->bitmap_index))
4507 visited[pred_bb->index] = 1;
4509 /* Neither gen nor kill. */
4512 visited[pred_bb->index] = 1;
4513 if (pre_expr_reaches_here_p_work (occr_bb, expr, pred_bb, visited))
4518 /* All paths have been checked. */
4522 /* The wrapper for pre_expr_reaches_here_work that ensures that any
4523 memory allocated for that function is returned. */
4526 pre_expr_reaches_here_p (occr_bb, expr, bb)
4527 basic_block occr_bb;
4532 char *visited = (char *) xcalloc (n_basic_blocks, 1);
4534 rval = pre_expr_reaches_here_p_work(occr_bb, expr, bb, visited);
4541 /* Given an expr, generate RTL which we can insert at the end of a BB,
4542 or on an edge. Set the block number of any insns generated to
4546 process_insert_insn (expr)
4549 rtx reg = expr->reaching_reg;
4550 rtx exp = copy_rtx (expr->expr);
4555 /* If the expression is something that's an operand, like a constant,
4556 just copy it to a register. */
4557 if (general_operand (exp, GET_MODE (reg)))
4558 emit_move_insn (reg, exp);
4560 /* Otherwise, make a new insn to compute this expression and make sure the
4561 insn will be recognized (this also adds any needed CLOBBERs). Copy the
4562 expression to make sure we don't have any sharing issues. */
4563 else if (insn_invalid_p (emit_insn (gen_rtx_SET (VOIDmode, reg, exp))))
4566 pat = gen_sequence ();
4572 /* Add EXPR to the end of basic block BB.
4574 This is used by both the PRE and code hoisting.
4576 For PRE, we want to verify that the expr is either transparent
4577 or locally anticipatable in the target block. This check makes
4578 no sense for code hoisting. */
4581 insert_insn_end_bb (expr, bb, pre)
4588 rtx reg = expr->reaching_reg;
4589 int regno = REGNO (reg);
4593 pat = process_insert_insn (expr);
4595 /* If the last insn is a jump, insert EXPR in front [taking care to
4596 handle cc0, etc. properly]. */
4598 if (GET_CODE (insn) == JUMP_INSN)
4604 /* If this is a jump table, then we can't insert stuff here. Since
4605 we know the previous real insn must be the tablejump, we insert
4606 the new instruction just before the tablejump. */
4607 if (GET_CODE (PATTERN (insn)) == ADDR_VEC
4608 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
4609 insn = prev_real_insn (insn);
4612 /* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts
4613 if cc0 isn't set. */
4614 note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
4616 insn = XEXP (note, 0);
4619 rtx maybe_cc0_setter = prev_nonnote_insn (insn);
4620 if (maybe_cc0_setter
4621 && INSN_P (maybe_cc0_setter)
4622 && sets_cc0_p (PATTERN (maybe_cc0_setter)))
4623 insn = maybe_cc0_setter;
4626 /* FIXME: What if something in cc0/jump uses value set in new insn? */
4627 new_insn = emit_block_insn_before (pat, insn, bb);
4630 /* Likewise if the last insn is a call, as will happen in the presence
4631 of exception handling. */
4632 else if (GET_CODE (insn) == CALL_INSN)
4634 /* Keeping in mind SMALL_REGISTER_CLASSES and parameters in registers,
4635 we search backward and place the instructions before the first
4636 parameter is loaded. Do this for everyone for consistency and a
4637 presumtion that we'll get better code elsewhere as well.
4639 It should always be the case that we can put these instructions
4640 anywhere in the basic block with performing PRE optimizations.
4644 && !TEST_BIT (antloc[bb->index], expr->bitmap_index)
4645 && !TEST_BIT (transp[bb->index], expr->bitmap_index))
4648 /* Since different machines initialize their parameter registers
4649 in different orders, assume nothing. Collect the set of all
4650 parameter registers. */
4651 insn = find_first_parameter_load (insn, bb->head);
4653 /* If we found all the parameter loads, then we want to insert
4654 before the first parameter load.
4656 If we did not find all the parameter loads, then we might have
4657 stopped on the head of the block, which could be a CODE_LABEL.
4658 If we inserted before the CODE_LABEL, then we would be putting
4659 the insn in the wrong basic block. In that case, put the insn
4660 after the CODE_LABEL. Also, respect NOTE_INSN_BASIC_BLOCK. */
4661 while (GET_CODE (insn) == CODE_LABEL
4662 || NOTE_INSN_BASIC_BLOCK_P (insn))
4663 insn = NEXT_INSN (insn);
4665 new_insn = emit_block_insn_before (pat, insn, bb);
4669 new_insn = emit_insn_after (pat, insn);
4673 /* Keep block number table up to date.
4674 Note, PAT could be a multiple insn sequence, we have to make
4675 sure that each insn in the sequence is handled. */
4676 if (GET_CODE (pat) == SEQUENCE)
4678 for (i = 0; i < XVECLEN (pat, 0); i++)
4680 rtx insn = XVECEXP (pat, 0, i);
4682 set_block_for_insn (insn, bb);
4684 add_label_notes (PATTERN (insn), new_insn);
4686 note_stores (PATTERN (insn), record_set_info, insn);
4691 add_label_notes (SET_SRC (pat), new_insn);
4692 set_block_for_new_insns (new_insn, bb);
4694 /* Keep register set table up to date. */
4695 record_one_set (regno, new_insn);
4698 gcse_create_count++;
4702 fprintf (gcse_file, "PRE/HOIST: end of bb %d, insn %d, ",
4703 bb->index, INSN_UID (new_insn));
4704 fprintf (gcse_file, "copying expression %d to reg %d\n",
4705 expr->bitmap_index, regno);
4709 /* Insert partially redundant expressions on edges in the CFG to make
4710 the expressions fully redundant. */
4713 pre_edge_insert (edge_list, index_map)
4714 struct edge_list *edge_list;
4715 struct expr **index_map;
4717 int e, i, j, num_edges, set_size, did_insert = 0;
4720 /* Where PRE_INSERT_MAP is nonzero, we add the expression on that edge
4721 if it reaches any of the deleted expressions. */
4723 set_size = pre_insert_map[0]->size;
4724 num_edges = NUM_EDGES (edge_list);
4725 inserted = sbitmap_vector_alloc (num_edges, n_exprs);
4726 sbitmap_vector_zero (inserted, num_edges);
4728 for (e = 0; e < num_edges; e++)
4731 basic_block bb = INDEX_EDGE_PRED_BB (edge_list, e);
4733 for (i = indx = 0; i < set_size; i++, indx += SBITMAP_ELT_BITS)
4735 SBITMAP_ELT_TYPE insert = pre_insert_map[e]->elms[i];
4737 for (j = indx; insert && j < n_exprs; j++, insert >>= 1)
4738 if ((insert & 1) != 0 && index_map[j]->reaching_reg != NULL_RTX)
4740 struct expr *expr = index_map[j];
4743 /* Now look at each deleted occurence of this expression. */
4744 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
4746 if (! occr->deleted_p)
4749 /* Insert this expression on this edge if if it would
4750 reach the deleted occurence in BB. */
4751 if (!TEST_BIT (inserted[e], j))
4754 edge eg = INDEX_EDGE (edge_list, e);
4756 /* We can't insert anything on an abnormal and
4757 critical edge, so we insert the insn at the end of
4758 the previous block. There are several alternatives
4759 detailed in Morgans book P277 (sec 10.5) for
4760 handling this situation. This one is easiest for
4763 if ((eg->flags & EDGE_ABNORMAL) == EDGE_ABNORMAL)
4764 insert_insn_end_bb (index_map[j], bb, 0);
4767 insn = process_insert_insn (index_map[j]);
4768 insert_insn_on_edge (insn, eg);
4773 fprintf (gcse_file, "PRE/HOIST: edge (%d,%d), ",
4775 INDEX_EDGE_SUCC_BB (edge_list, e)->index);
4776 fprintf (gcse_file, "copy expression %d\n",
4777 expr->bitmap_index);
4780 update_ld_motion_stores (expr);
4781 SET_BIT (inserted[e], j);
4783 gcse_create_count++;
4790 sbitmap_vector_free (inserted);
4794 /* Copy the result of INSN to REG. INDX is the expression number. */
4797 pre_insert_copy_insn (expr, insn)
4801 rtx reg = expr->reaching_reg;
4802 int regno = REGNO (reg);
4803 int indx = expr->bitmap_index;
4804 rtx set = single_set (insn);
4806 basic_block bb = BLOCK_FOR_INSN (insn);
4811 new_insn = emit_insn_after (gen_move_insn (reg, SET_DEST (set)), insn);
4813 /* Keep block number table up to date. */
4814 set_block_for_new_insns (new_insn, bb);
4816 /* Keep register set table up to date. */
4817 record_one_set (regno, new_insn);
4818 if (insn == bb->end)
4821 gcse_create_count++;
4825 "PRE: bb %d, insn %d, copy expression %d in insn %d to reg %d\n",
4826 BLOCK_NUM (insn), INSN_UID (new_insn), indx,
4827 INSN_UID (insn), regno);
4828 update_ld_motion_stores (expr);
4831 /* Copy available expressions that reach the redundant expression
4832 to `reaching_reg'. */
4835 pre_insert_copies ()
4842 /* For each available expression in the table, copy the result to
4843 `reaching_reg' if the expression reaches a deleted one.
4845 ??? The current algorithm is rather brute force.
4846 Need to do some profiling. */
4848 for (i = 0; i < expr_hash_table_size; i++)
4849 for (expr = expr_hash_table[i]; expr != NULL; expr = expr->next_same_hash)
4851 /* If the basic block isn't reachable, PPOUT will be TRUE. However,
4852 we don't want to insert a copy here because the expression may not
4853 really be redundant. So only insert an insn if the expression was
4854 deleted. This test also avoids further processing if the
4855 expression wasn't deleted anywhere. */
4856 if (expr->reaching_reg == NULL)
4859 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
4861 if (! occr->deleted_p)
4864 for (avail = expr->avail_occr; avail != NULL; avail = avail->next)
4866 rtx insn = avail->insn;
4868 /* No need to handle this one if handled already. */
4869 if (avail->copied_p)
4872 /* Don't handle this one if it's a redundant one. */
4873 if (TEST_BIT (pre_redundant_insns, INSN_CUID (insn)))
4876 /* Or if the expression doesn't reach the deleted one. */
4877 if (! pre_expr_reaches_here_p (BLOCK_FOR_INSN (avail->insn),
4879 BLOCK_FOR_INSN (occr->insn)))
4882 /* Copy the result of avail to reaching_reg. */
4883 pre_insert_copy_insn (expr, insn);
4884 avail->copied_p = 1;
4890 /* Delete redundant computations.
4891 Deletion is done by changing the insn to copy the `reaching_reg' of
4892 the expression into the result of the SET. It is left to later passes
4893 (cprop, cse2, flow, combine, regmove) to propagate the copy or eliminate it.
4895 Returns non-zero if a change is made. */
4906 for (i = 0; i < expr_hash_table_size; i++)
4907 for (expr = expr_hash_table[i]; expr != NULL; expr = expr->next_same_hash)
4909 int indx = expr->bitmap_index;
4911 /* We only need to search antic_occr since we require
4914 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
4916 rtx insn = occr->insn;
4918 basic_block bb = BLOCK_FOR_INSN (insn);
4920 if (TEST_BIT (pre_delete_map[bb->index], indx))
4922 set = single_set (insn);
4926 /* Create a pseudo-reg to store the result of reaching
4927 expressions into. Get the mode for the new pseudo from
4928 the mode of the original destination pseudo. */
4929 if (expr->reaching_reg == NULL)
4931 = gen_reg_rtx (GET_MODE (SET_DEST (set)));
4933 /* In theory this should never fail since we're creating
4936 However, on the x86 some of the movXX patterns actually
4937 contain clobbers of scratch regs. This may cause the
4938 insn created by validate_change to not match any pattern
4939 and thus cause validate_change to fail. */
4940 if (validate_change (insn, &SET_SRC (set),
4941 expr->reaching_reg, 0))
4943 occr->deleted_p = 1;
4944 SET_BIT (pre_redundant_insns, INSN_CUID (insn));
4952 "PRE: redundant insn %d (expression %d) in ",
4953 INSN_UID (insn), indx);
4954 fprintf (gcse_file, "bb %d, reaching reg is %d\n",
4955 bb->index, REGNO (expr->reaching_reg));
4964 /* Perform GCSE optimizations using PRE.
4965 This is called by one_pre_gcse_pass after all the dataflow analysis
4968 This is based on the original Morel-Renvoise paper Fred Chow's thesis, and
4969 lazy code motion from Knoop, Ruthing and Steffen as described in Advanced
4970 Compiler Design and Implementation.
4972 ??? A new pseudo reg is created to hold the reaching expression. The nice
4973 thing about the classical approach is that it would try to use an existing
4974 reg. If the register can't be adequately optimized [i.e. we introduce
4975 reload problems], one could add a pass here to propagate the new register
4978 ??? We don't handle single sets in PARALLELs because we're [currently] not
4979 able to copy the rest of the parallel when we insert copies to create full
4980 redundancies from partial redundancies. However, there's no reason why we
4981 can't handle PARALLELs in the cases where there are no partial
4988 int did_insert, changed;
4989 struct expr **index_map;
4992 /* Compute a mapping from expression number (`bitmap_index') to
4993 hash table entry. */
4995 index_map = (struct expr **) xcalloc (n_exprs, sizeof (struct expr *));
4996 for (i = 0; i < expr_hash_table_size; i++)
4997 for (expr = expr_hash_table[i]; expr != NULL; expr = expr->next_same_hash)
4998 index_map[expr->bitmap_index] = expr;
5000 /* Reset bitmap used to track which insns are redundant. */
5001 pre_redundant_insns = sbitmap_alloc (max_cuid);
5002 sbitmap_zero (pre_redundant_insns);
5004 /* Delete the redundant insns first so that
5005 - we know what register to use for the new insns and for the other
5006 ones with reaching expressions
5007 - we know which insns are redundant when we go to create copies */
5009 changed = pre_delete ();
5011 did_insert = pre_edge_insert (edge_list, index_map);
5013 /* In other places with reaching expressions, copy the expression to the
5014 specially allocated pseudo-reg that reaches the redundant expr. */
5015 pre_insert_copies ();
5018 commit_edge_insertions ();
5023 free (pre_redundant_insns);
5027 /* Top level routine to perform one PRE GCSE pass.
5029 Return non-zero if a change was made. */
5032 one_pre_gcse_pass (pass)
5037 gcse_subst_count = 0;
5038 gcse_create_count = 0;
5040 alloc_expr_hash_table (max_cuid);
5041 add_noreturn_fake_exit_edges ();
5043 compute_ld_motion_mems ();
5045 compute_expr_hash_table ();
5046 trim_ld_motion_mems ();
5048 dump_hash_table (gcse_file, "Expression", expr_hash_table,
5049 expr_hash_table_size, n_exprs);
5053 alloc_pre_mem (n_basic_blocks, n_exprs);
5054 compute_pre_data ();
5055 changed |= pre_gcse ();
5056 free_edge_list (edge_list);
5061 remove_fake_edges ();
5062 free_expr_hash_table ();
5066 fprintf (gcse_file, "\nPRE GCSE of %s, pass %d: %d bytes needed, ",
5067 current_function_name, pass, bytes_used);
5068 fprintf (gcse_file, "%d substs, %d insns created\n",
5069 gcse_subst_count, gcse_create_count);
5075 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to INSN.
5076 If notes are added to an insn which references a CODE_LABEL, the
5077 LABEL_NUSES count is incremented. We have to add REG_LABEL notes,
5078 because the following loop optimization pass requires them. */
5080 /* ??? This is very similar to the loop.c add_label_notes function. We
5081 could probably share code here. */
5083 /* ??? If there was a jump optimization pass after gcse and before loop,
5084 then we would not need to do this here, because jump would add the
5085 necessary REG_LABEL notes. */
5088 add_label_notes (x, insn)
5092 enum rtx_code code = GET_CODE (x);
5096 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
5098 /* This code used to ignore labels that referred to dispatch tables to
5099 avoid flow generating (slighly) worse code.
5101 We no longer ignore such label references (see LABEL_REF handling in
5102 mark_jump_label for additional information). */
5104 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LABEL, XEXP (x, 0),
5106 if (LABEL_P (XEXP (x, 0)))
5107 LABEL_NUSES (XEXP (x, 0))++;
5111 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
5114 add_label_notes (XEXP (x, i), insn);
5115 else if (fmt[i] == 'E')
5116 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5117 add_label_notes (XVECEXP (x, i, j), insn);
5121 /* Compute transparent outgoing information for each block.
5123 An expression is transparent to an edge unless it is killed by
5124 the edge itself. This can only happen with abnormal control flow,
5125 when the edge is traversed through a call. This happens with
5126 non-local labels and exceptions.
5128 This would not be necessary if we split the edge. While this is
5129 normally impossible for abnormal critical edges, with some effort
5130 it should be possible with exception handling, since we still have
5131 control over which handler should be invoked. But due to increased
5132 EH table sizes, this may not be worthwhile. */
5135 compute_transpout ()
5141 sbitmap_vector_ones (transpout, n_basic_blocks);
5143 for (bb = 0; bb < n_basic_blocks; ++bb)
5145 /* Note that flow inserted a nop a the end of basic blocks that
5146 end in call instructions for reasons other than abnormal
5148 if (GET_CODE (BLOCK_END (bb)) != CALL_INSN)
5151 for (i = 0; i < expr_hash_table_size; i++)
5152 for (expr = expr_hash_table[i]; expr ; expr = expr->next_same_hash)
5153 if (GET_CODE (expr->expr) == MEM)
5155 if (GET_CODE (XEXP (expr->expr, 0)) == SYMBOL_REF
5156 && CONSTANT_POOL_ADDRESS_P (XEXP (expr->expr, 0)))
5159 /* ??? Optimally, we would use interprocedural alias
5160 analysis to determine if this mem is actually killed
5162 RESET_BIT (transpout[bb], expr->bitmap_index);
5167 /* Removal of useless null pointer checks */
5169 /* Called via note_stores. X is set by SETTER. If X is a register we must
5170 invalidate nonnull_local and set nonnull_killed. DATA is really a
5171 `null_pointer_info *'.
5173 We ignore hard registers. */
5176 invalidate_nonnull_info (x, setter, data)
5178 rtx setter ATTRIBUTE_UNUSED;
5182 struct null_pointer_info *npi = (struct null_pointer_info *) data;
5184 while (GET_CODE (x) == SUBREG)
5187 /* Ignore anything that is not a register or is a hard register. */
5188 if (GET_CODE (x) != REG
5189 || REGNO (x) < npi->min_reg
5190 || REGNO (x) >= npi->max_reg)
5193 regno = REGNO (x) - npi->min_reg;
5195 RESET_BIT (npi->nonnull_local[npi->current_block], regno);
5196 SET_BIT (npi->nonnull_killed[npi->current_block], regno);
5199 /* Do null-pointer check elimination for the registers indicated in
5200 NPI. NONNULL_AVIN and NONNULL_AVOUT are pre-allocated sbitmaps;
5201 they are not our responsibility to free. */
5204 delete_null_pointer_checks_1 (delete_list, block_reg, nonnull_avin,
5206 varray_type *delete_list;
5207 unsigned int *block_reg;
5208 sbitmap *nonnull_avin;
5209 sbitmap *nonnull_avout;
5210 struct null_pointer_info *npi;
5214 sbitmap *nonnull_local = npi->nonnull_local;
5215 sbitmap *nonnull_killed = npi->nonnull_killed;
5217 /* Compute local properties, nonnull and killed. A register will have
5218 the nonnull property if at the end of the current block its value is
5219 known to be nonnull. The killed property indicates that somewhere in
5220 the block any information we had about the register is killed.
5222 Note that a register can have both properties in a single block. That
5223 indicates that it's killed, then later in the block a new value is
5225 sbitmap_vector_zero (nonnull_local, n_basic_blocks);
5226 sbitmap_vector_zero (nonnull_killed, n_basic_blocks);
5228 for (current_block = 0; current_block < n_basic_blocks; current_block++)
5230 rtx insn, stop_insn;
5232 /* Set the current block for invalidate_nonnull_info. */
5233 npi->current_block = current_block;
5235 /* Scan each insn in the basic block looking for memory references and
5237 stop_insn = NEXT_INSN (BLOCK_END (current_block));
5238 for (insn = BLOCK_HEAD (current_block);
5240 insn = NEXT_INSN (insn))
5245 /* Ignore anything that is not a normal insn. */
5246 if (! INSN_P (insn))
5249 /* Basically ignore anything that is not a simple SET. We do have
5250 to make sure to invalidate nonnull_local and set nonnull_killed
5251 for such insns though. */
5252 set = single_set (insn);
5255 note_stores (PATTERN (insn), invalidate_nonnull_info, npi);
5259 /* See if we've got a useable memory load. We handle it first
5260 in case it uses its address register as a dest (which kills
5261 the nonnull property). */
5262 if (GET_CODE (SET_SRC (set)) == MEM
5263 && GET_CODE ((reg = XEXP (SET_SRC (set), 0))) == REG
5264 && REGNO (reg) >= npi->min_reg
5265 && REGNO (reg) < npi->max_reg)
5266 SET_BIT (nonnull_local[current_block],
5267 REGNO (reg) - npi->min_reg);
5269 /* Now invalidate stuff clobbered by this insn. */
5270 note_stores (PATTERN (insn), invalidate_nonnull_info, npi);
5272 /* And handle stores, we do these last since any sets in INSN can
5273 not kill the nonnull property if it is derived from a MEM
5274 appearing in a SET_DEST. */
5275 if (GET_CODE (SET_DEST (set)) == MEM
5276 && GET_CODE ((reg = XEXP (SET_DEST (set), 0))) == REG
5277 && REGNO (reg) >= npi->min_reg
5278 && REGNO (reg) < npi->max_reg)
5279 SET_BIT (nonnull_local[current_block],
5280 REGNO (reg) - npi->min_reg);
5284 /* Now compute global properties based on the local properties. This
5285 is a classic global availablity algorithm. */
5286 compute_available (nonnull_local, nonnull_killed,
5287 nonnull_avout, nonnull_avin);
5289 /* Now look at each bb and see if it ends with a compare of a value
5291 for (bb = 0; bb < n_basic_blocks; bb++)
5293 rtx last_insn = BLOCK_END (bb);
5294 rtx condition, earliest;
5295 int compare_and_branch;
5297 /* Since MIN_REG is always at least FIRST_PSEUDO_REGISTER, and
5298 since BLOCK_REG[BB] is zero if this block did not end with a
5299 comparison against zero, this condition works. */
5300 if (block_reg[bb] < npi->min_reg
5301 || block_reg[bb] >= npi->max_reg)
5304 /* LAST_INSN is a conditional jump. Get its condition. */
5305 condition = get_condition (last_insn, &earliest);
5307 /* If we can't determine the condition then skip. */
5311 /* Is the register known to have a nonzero value? */
5312 if (!TEST_BIT (nonnull_avout[bb], block_reg[bb] - npi->min_reg))
5315 /* Try to compute whether the compare/branch at the loop end is one or
5316 two instructions. */
5317 if (earliest == last_insn)
5318 compare_and_branch = 1;
5319 else if (earliest == prev_nonnote_insn (last_insn))
5320 compare_and_branch = 2;
5324 /* We know the register in this comparison is nonnull at exit from
5325 this block. We can optimize this comparison. */
5326 if (GET_CODE (condition) == NE)
5330 new_jump = emit_jump_insn_before (gen_jump (JUMP_LABEL (last_insn)),
5332 JUMP_LABEL (new_jump) = JUMP_LABEL (last_insn);
5333 LABEL_NUSES (JUMP_LABEL (new_jump))++;
5334 emit_barrier_after (new_jump);
5337 VARRAY_RTX_INIT (*delete_list, 10, "delete_list");
5339 VARRAY_PUSH_RTX (*delete_list, last_insn);
5340 if (compare_and_branch == 2)
5341 VARRAY_PUSH_RTX (*delete_list, earliest);
5343 /* Don't check this block again. (Note that BLOCK_END is
5344 invalid here; we deleted the last instruction in the
5350 /* Find EQ/NE comparisons against zero which can be (indirectly) evaluated
5353 This is conceptually similar to global constant/copy propagation and
5354 classic global CSE (it even uses the same dataflow equations as cprop).
5356 If a register is used as memory address with the form (mem (reg)), then we
5357 know that REG can not be zero at that point in the program. Any instruction
5358 which sets REG "kills" this property.
5360 So, if every path leading to a conditional branch has an available memory
5361 reference of that form, then we know the register can not have the value
5362 zero at the conditional branch.
5364 So we merely need to compute the local properies and propagate that data
5365 around the cfg, then optimize where possible.
5367 We run this pass two times. Once before CSE, then again after CSE. This
5368 has proven to be the most profitable approach. It is rare for new
5369 optimization opportunities of this nature to appear after the first CSE
5372 This could probably be integrated with global cprop with a little work. */
5375 delete_null_pointer_checks (f)
5376 rtx f ATTRIBUTE_UNUSED;
5378 sbitmap *nonnull_avin, *nonnull_avout;
5379 unsigned int *block_reg;
5380 varray_type delete_list = NULL;
5386 struct null_pointer_info npi;
5388 /* If we have only a single block, then there's nothing to do. */
5389 if (n_basic_blocks <= 1)
5392 /* Trying to perform global optimizations on flow graphs which have
5393 a high connectivity will take a long time and is unlikely to be
5394 particularly useful.
5396 In normal circumstances a cfg should have about twice as many edges
5397 as blocks. But we do not want to punish small functions which have
5398 a couple switch statements. So we require a relatively large number
5399 of basic blocks and the ratio of edges to blocks to be high. */
5400 if (n_basic_blocks > 1000 && n_edges / n_basic_blocks >= 20)
5403 /* We need four bitmaps, each with a bit for each register in each
5405 max_reg = max_reg_num ();
5406 regs_per_pass = get_bitmap_width (4, n_basic_blocks, max_reg);
5408 /* Allocate bitmaps to hold local and global properties. */
5409 npi.nonnull_local = sbitmap_vector_alloc (n_basic_blocks, regs_per_pass);
5410 npi.nonnull_killed = sbitmap_vector_alloc (n_basic_blocks, regs_per_pass);
5411 nonnull_avin = sbitmap_vector_alloc (n_basic_blocks, regs_per_pass);
5412 nonnull_avout = sbitmap_vector_alloc (n_basic_blocks, regs_per_pass);
5414 /* Go through the basic blocks, seeing whether or not each block
5415 ends with a conditional branch whose condition is a comparison
5416 against zero. Record the register compared in BLOCK_REG. */
5417 block_reg = (unsigned int *) xcalloc (n_basic_blocks, sizeof (int));
5418 for (bb = 0; bb < n_basic_blocks; bb++)
5420 rtx last_insn = BLOCK_END (bb);
5421 rtx condition, earliest, reg;
5423 /* We only want conditional branches. */
5424 if (GET_CODE (last_insn) != JUMP_INSN
5425 || !any_condjump_p (last_insn)
5426 || !onlyjump_p (last_insn))
5429 /* LAST_INSN is a conditional jump. Get its condition. */
5430 condition = get_condition (last_insn, &earliest);
5432 /* If we were unable to get the condition, or it is not a equality
5433 comparison against zero then there's nothing we can do. */
5435 || (GET_CODE (condition) != NE && GET_CODE (condition) != EQ)
5436 || GET_CODE (XEXP (condition, 1)) != CONST_INT
5437 || (XEXP (condition, 1)
5438 != CONST0_RTX (GET_MODE (XEXP (condition, 0)))))
5441 /* We must be checking a register against zero. */
5442 reg = XEXP (condition, 0);
5443 if (GET_CODE (reg) != REG)
5446 block_reg[bb] = REGNO (reg);
5449 /* Go through the algorithm for each block of registers. */
5450 for (reg = FIRST_PSEUDO_REGISTER; reg < max_reg; reg += regs_per_pass)
5453 npi.max_reg = MIN (reg + regs_per_pass, max_reg);
5454 delete_null_pointer_checks_1 (&delete_list, block_reg, nonnull_avin,
5455 nonnull_avout, &npi);
5458 /* Now delete the instructions all at once. This breaks the CFG. */
5461 for (i = 0; i < VARRAY_ACTIVE_SIZE (delete_list); i++)
5462 delete_insn (VARRAY_RTX (delete_list, i));
5463 VARRAY_FREE (delete_list);
5466 /* Free the table of registers compared at the end of every block. */
5470 sbitmap_vector_free (npi.nonnull_local);
5471 sbitmap_vector_free (npi.nonnull_killed);
5472 sbitmap_vector_free (nonnull_avin);
5473 sbitmap_vector_free (nonnull_avout);
5476 /* Code Hoisting variables and subroutines. */
5478 /* Very busy expressions. */
5479 static sbitmap *hoist_vbein;
5480 static sbitmap *hoist_vbeout;
5482 /* Hoistable expressions. */
5483 static sbitmap *hoist_exprs;
5485 /* Dominator bitmaps. */
5486 static sbitmap *dominators;
5488 /* ??? We could compute post dominators and run this algorithm in
5489 reverse to to perform tail merging, doing so would probably be
5490 more effective than the tail merging code in jump.c.
5492 It's unclear if tail merging could be run in parallel with
5493 code hoisting. It would be nice. */
5495 /* Allocate vars used for code hoisting analysis. */
5498 alloc_code_hoist_mem (n_blocks, n_exprs)
5499 int n_blocks, n_exprs;
5501 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
5502 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
5503 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
5505 hoist_vbein = sbitmap_vector_alloc (n_blocks, n_exprs);
5506 hoist_vbeout = sbitmap_vector_alloc (n_blocks, n_exprs);
5507 hoist_exprs = sbitmap_vector_alloc (n_blocks, n_exprs);
5508 transpout = sbitmap_vector_alloc (n_blocks, n_exprs);
5510 dominators = sbitmap_vector_alloc (n_blocks, n_blocks);
5513 /* Free vars used for code hoisting analysis. */
5516 free_code_hoist_mem ()
5518 sbitmap_vector_free (antloc);
5519 sbitmap_vector_free (transp);
5520 sbitmap_vector_free (comp);
5522 sbitmap_vector_free (hoist_vbein);
5523 sbitmap_vector_free (hoist_vbeout);
5524 sbitmap_vector_free (hoist_exprs);
5525 sbitmap_vector_free (transpout);
5527 sbitmap_vector_free (dominators);
5530 /* Compute the very busy expressions at entry/exit from each block.
5532 An expression is very busy if all paths from a given point
5533 compute the expression. */
5536 compute_code_hoist_vbeinout ()
5538 int bb, changed, passes;
5540 sbitmap_vector_zero (hoist_vbeout, n_basic_blocks);
5541 sbitmap_vector_zero (hoist_vbein, n_basic_blocks);
5550 /* We scan the blocks in the reverse order to speed up
5552 for (bb = n_basic_blocks - 1; bb >= 0; bb--)
5554 changed |= sbitmap_a_or_b_and_c (hoist_vbein[bb], antloc[bb],
5555 hoist_vbeout[bb], transp[bb]);
5556 if (bb != n_basic_blocks - 1)
5557 sbitmap_intersection_of_succs (hoist_vbeout[bb], hoist_vbein, bb);
5564 fprintf (gcse_file, "hoisting vbeinout computation: %d passes\n", passes);
5567 /* Top level routine to do the dataflow analysis needed by code hoisting. */
5570 compute_code_hoist_data ()
5572 compute_local_properties (transp, comp, antloc, 0);
5573 compute_transpout ();
5574 compute_code_hoist_vbeinout ();
5575 calculate_dominance_info (NULL, dominators, CDI_DOMINATORS);
5577 fprintf (gcse_file, "\n");
5580 /* Determine if the expression identified by EXPR_INDEX would
5581 reach BB unimpared if it was placed at the end of EXPR_BB.
5583 It's unclear exactly what Muchnick meant by "unimpared". It seems
5584 to me that the expression must either be computed or transparent in
5585 *every* block in the path(s) from EXPR_BB to BB. Any other definition
5586 would allow the expression to be hoisted out of loops, even if
5587 the expression wasn't a loop invariant.
5589 Contrast this to reachability for PRE where an expression is
5590 considered reachable if *any* path reaches instead of *all*
5594 hoist_expr_reaches_here_p (expr_bb, expr_index, bb, visited)
5595 basic_block expr_bb;
5601 int visited_allocated_locally = 0;
5604 if (visited == NULL)
5606 visited_allocated_locally = 1;
5607 visited = xcalloc (n_basic_blocks, 1);
5610 for (pred = bb->pred; pred != NULL; pred = pred->pred_next)
5612 basic_block pred_bb = pred->src;
5614 if (pred->src == ENTRY_BLOCK_PTR)
5616 else if (visited[pred_bb->index])
5619 /* Does this predecessor generate this expression? */
5620 else if (TEST_BIT (comp[pred_bb->index], expr_index))
5622 else if (! TEST_BIT (transp[pred_bb->index], expr_index))
5628 visited[pred_bb->index] = 1;
5629 if (! hoist_expr_reaches_here_p (expr_bb, expr_index,
5634 if (visited_allocated_locally)
5637 return (pred == NULL);
5640 /* Actually perform code hoisting. */
5647 struct expr **index_map;
5650 sbitmap_vector_zero (hoist_exprs, n_basic_blocks);
5652 /* Compute a mapping from expression number (`bitmap_index') to
5653 hash table entry. */
5655 index_map = (struct expr **) xcalloc (n_exprs, sizeof (struct expr *));
5656 for (i = 0; i < expr_hash_table_size; i++)
5657 for (expr = expr_hash_table[i]; expr != NULL; expr = expr->next_same_hash)
5658 index_map[expr->bitmap_index] = expr;
5660 /* Walk over each basic block looking for potentially hoistable
5661 expressions, nothing gets hoisted from the entry block. */
5662 for (bb = 0; bb < n_basic_blocks; bb++)
5665 int insn_inserted_p;
5667 /* Examine each expression that is very busy at the exit of this
5668 block. These are the potentially hoistable expressions. */
5669 for (i = 0; i < hoist_vbeout[bb]->n_bits; i++)
5673 if (TEST_BIT (hoist_vbeout[bb], i) && TEST_BIT (transpout[bb], i))
5675 /* We've found a potentially hoistable expression, now
5676 we look at every block BB dominates to see if it
5677 computes the expression. */
5678 for (dominated = 0; dominated < n_basic_blocks; dominated++)
5680 /* Ignore self dominance. */
5682 || ! TEST_BIT (dominators[dominated], bb))
5685 /* We've found a dominated block, now see if it computes
5686 the busy expression and whether or not moving that
5687 expression to the "beginning" of that block is safe. */
5688 if (!TEST_BIT (antloc[dominated], i))
5691 /* Note if the expression would reach the dominated block
5692 unimpared if it was placed at the end of BB.
5694 Keep track of how many times this expression is hoistable
5695 from a dominated block into BB. */
5696 if (hoist_expr_reaches_here_p (BASIC_BLOCK (bb), i,
5697 BASIC_BLOCK (dominated), NULL))
5701 /* If we found more than one hoistable occurence of this
5702 expression, then note it in the bitmap of expressions to
5703 hoist. It makes no sense to hoist things which are computed
5704 in only one BB, and doing so tends to pessimize register
5705 allocation. One could increase this value to try harder
5706 to avoid any possible code expansion due to register
5707 allocation issues; however experiments have shown that
5708 the vast majority of hoistable expressions are only movable
5709 from two successors, so raising this threshhold is likely
5710 to nullify any benefit we get from code hoisting. */
5713 SET_BIT (hoist_exprs[bb], i);
5719 /* If we found nothing to hoist, then quit now. */
5723 /* Loop over all the hoistable expressions. */
5724 for (i = 0; i < hoist_exprs[bb]->n_bits; i++)
5726 /* We want to insert the expression into BB only once, so
5727 note when we've inserted it. */
5728 insn_inserted_p = 0;
5730 /* These tests should be the same as the tests above. */
5731 if (TEST_BIT (hoist_vbeout[bb], i))
5733 /* We've found a potentially hoistable expression, now
5734 we look at every block BB dominates to see if it
5735 computes the expression. */
5736 for (dominated = 0; dominated < n_basic_blocks; dominated++)
5738 /* Ignore self dominance. */
5740 || ! TEST_BIT (dominators[dominated], bb))
5743 /* We've found a dominated block, now see if it computes
5744 the busy expression and whether or not moving that
5745 expression to the "beginning" of that block is safe. */
5746 if (!TEST_BIT (antloc[dominated], i))
5749 /* The expression is computed in the dominated block and
5750 it would be safe to compute it at the start of the
5751 dominated block. Now we have to determine if the
5752 expresion would reach the dominated block if it was
5753 placed at the end of BB. */
5754 if (hoist_expr_reaches_here_p (BASIC_BLOCK (bb), i,
5755 BASIC_BLOCK (dominated), NULL))
5757 struct expr *expr = index_map[i];
5758 struct occr *occr = expr->antic_occr;
5762 /* Find the right occurence of this expression. */
5763 while (BLOCK_NUM (occr->insn) != dominated && occr)
5766 /* Should never happen. */
5772 set = single_set (insn);
5776 /* Create a pseudo-reg to store the result of reaching
5777 expressions into. Get the mode for the new pseudo
5778 from the mode of the original destination pseudo. */
5779 if (expr->reaching_reg == NULL)
5781 = gen_reg_rtx (GET_MODE (SET_DEST (set)));
5783 /* In theory this should never fail since we're creating
5786 However, on the x86 some of the movXX patterns
5787 actually contain clobbers of scratch regs. This may
5788 cause the insn created by validate_change to not
5789 match any pattern and thus cause validate_change to
5791 if (validate_change (insn, &SET_SRC (set),
5792 expr->reaching_reg, 0))
5794 occr->deleted_p = 1;
5795 if (!insn_inserted_p)
5797 insert_insn_end_bb (index_map[i],
5798 BASIC_BLOCK (bb), 0);
5799 insn_inserted_p = 1;
5811 /* Top level routine to perform one code hoisting (aka unification) pass
5813 Return non-zero if a change was made. */
5816 one_code_hoisting_pass ()
5820 alloc_expr_hash_table (max_cuid);
5821 compute_expr_hash_table ();
5823 dump_hash_table (gcse_file, "Code Hosting Expressions", expr_hash_table,
5824 expr_hash_table_size, n_exprs);
5828 alloc_code_hoist_mem (n_basic_blocks, n_exprs);
5829 compute_code_hoist_data ();
5831 free_code_hoist_mem ();
5834 free_expr_hash_table ();
5839 /* Here we provide the things required to do store motion towards
5840 the exit. In order for this to be effective, gcse also needed to
5841 be taught how to move a load when it is kill only by a store to itself.
5846 void foo(float scale)
5848 for (i=0; i<10; i++)
5852 'i' is both loaded and stored to in the loop. Normally, gcse cannot move
5853 the load out since its live around the loop, and stored at the bottom
5856 The 'Load Motion' referred to and implemented in this file is
5857 an enhancement to gcse which when using edge based lcm, recognizes
5858 this situation and allows gcse to move the load out of the loop.
5860 Once gcse has hoisted the load, store motion can then push this
5861 load towards the exit, and we end up with no loads or stores of 'i'
5864 /* This will search the ldst list for a matching expresion. If it
5865 doesn't find one, we create one and initialize it. */
5867 static struct ls_expr *
5871 struct ls_expr * ptr;
5873 for (ptr = first_ls_expr(); ptr != NULL; ptr = next_ls_expr (ptr))
5874 if (expr_equiv_p (ptr->pattern, x))
5879 ptr = (struct ls_expr *) xmalloc (sizeof (struct ls_expr));
5881 ptr->next = pre_ldst_mems;
5884 ptr->loads = NULL_RTX;
5885 ptr->stores = NULL_RTX;
5886 ptr->reaching_reg = NULL_RTX;
5889 ptr->hash_index = 0;
5890 pre_ldst_mems = ptr;
5896 /* Free up an individual ldst entry. */
5899 free_ldst_entry (ptr)
5900 struct ls_expr * ptr;
5903 free_INSN_LIST_list (&ptr->stores);
5904 free_INSN_LIST_list (&ptr->loads);
5908 /* Free up all memory associated with the ldst list. */
5913 while (pre_ldst_mems)
5915 struct ls_expr * tmp = pre_ldst_mems;
5917 pre_ldst_mems = pre_ldst_mems->next;
5919 free_ldst_entry (tmp);
5922 pre_ldst_mems = NULL;
5925 /* Dump debugging info about the ldst list. */
5928 print_ldst_list (file)
5931 struct ls_expr * ptr;
5933 fprintf (file, "LDST list: \n");
5935 for (ptr = first_ls_expr(); ptr != NULL; ptr = next_ls_expr (ptr))
5937 fprintf (file, " Pattern (%3d): ", ptr->index);
5939 print_rtl (file, ptr->pattern);
5941 fprintf (file, "\n Loads : ");
5944 print_rtl (file, ptr->loads);
5946 fprintf (file, "(nil)");
5948 fprintf (file, "\n Stores : ");
5951 print_rtl (file, ptr->stores);
5953 fprintf (file, "(nil)");
5955 fprintf (file, "\n\n");
5958 fprintf (file, "\n");
5961 /* Returns 1 if X is in the list of ldst only expressions. */
5963 static struct ls_expr *
5964 find_rtx_in_ldst (x)
5967 struct ls_expr * ptr;
5969 for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next)
5970 if (expr_equiv_p (ptr->pattern, x) && ! ptr->invalid)
5976 /* Assign each element of the list of mems a monotonically increasing value. */
5981 struct ls_expr * ptr;
5984 for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next)
5990 /* Return first item in the list. */
5992 static inline struct ls_expr *
5995 return pre_ldst_mems;
5998 /* Return the next item in ther list after the specified one. */
6000 static inline struct ls_expr *
6002 struct ls_expr * ptr;
6007 /* Load Motion for loads which only kill themselves. */
6009 /* Return true if x is a simple MEM operation, with no registers or
6010 side effects. These are the types of loads we consider for the
6011 ld_motion list, otherwise we let the usual aliasing take care of it. */
6017 if (GET_CODE (x) != MEM)
6020 if (MEM_VOLATILE_P (x))
6023 if (GET_MODE (x) == BLKmode)
6026 /* See comment in find_moveable_store */
6027 if (!rtx_addr_varies_p (XEXP (x, 0), 0))
6033 /* Make sure there isn't a buried reference in this pattern anywhere.
6034 If there is, invalidate the entry for it since we're not capable
6035 of fixing it up just yet.. We have to be sure we know about ALL
6036 loads since the aliasing code will allow all entries in the
6037 ld_motion list to not-alias itself. If we miss a load, we will get
6038 the wrong value since gcse might common it and we won't know to
6042 invalidate_any_buried_refs (x)
6047 struct ls_expr * ptr;
6049 /* Invalidate it in the list. */
6050 if (GET_CODE (x) == MEM && simple_mem (x))
6052 ptr = ldst_entry (x);
6056 /* Recursively process the insn. */
6057 fmt = GET_RTX_FORMAT (GET_CODE (x));
6059 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6062 invalidate_any_buried_refs (XEXP (x, i));
6063 else if (fmt[i] == 'E')
6064 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6065 invalidate_any_buried_refs (XVECEXP (x, i, j));
6069 /* Find all the 'simple' MEMs which are used in LOADs and STORES. Simple
6070 being defined as MEM loads and stores to symbols, with no
6071 side effects and no registers in the expression. If there are any
6072 uses/defs which dont match this criteria, it is invalidated and
6073 trimmed out later. */
6076 compute_ld_motion_mems ()
6078 struct ls_expr * ptr;
6082 pre_ldst_mems = NULL;
6084 for (bb = 0; bb < n_basic_blocks; bb++)
6086 for (insn = BLOCK_HEAD (bb);
6087 insn && insn != NEXT_INSN (BLOCK_END (bb));
6088 insn = NEXT_INSN (insn))
6090 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
6092 if (GET_CODE (PATTERN (insn)) == SET)
6094 rtx src = SET_SRC (PATTERN (insn));
6095 rtx dest = SET_DEST (PATTERN (insn));
6097 /* Check for a simple LOAD... */
6098 if (GET_CODE (src) == MEM && simple_mem (src))
6100 ptr = ldst_entry (src);
6101 if (GET_CODE (dest) == REG)
6102 ptr->loads = alloc_INSN_LIST (insn, ptr->loads);
6108 /* Make sure there isn't a buried load somewhere. */
6109 invalidate_any_buried_refs (src);
6111 /* Check for stores. Don't worry about aliased ones, they
6112 will block any movement we might do later. We only care
6113 about this exact pattern since those are the only
6114 circumstance that we will ignore the aliasing info. */
6115 if (GET_CODE (dest) == MEM && simple_mem (dest))
6117 ptr = ldst_entry (dest);
6119 if (GET_CODE (src) != MEM
6120 && GET_CODE (src) != ASM_OPERANDS)
6121 ptr->stores = alloc_INSN_LIST (insn, ptr->stores);
6127 invalidate_any_buried_refs (PATTERN (insn));
6133 /* Remove any references that have been either invalidated or are not in the
6134 expression list for pre gcse. */
6137 trim_ld_motion_mems ()
6139 struct ls_expr * last = NULL;
6140 struct ls_expr * ptr = first_ls_expr ();
6144 int del = ptr->invalid;
6145 struct expr * expr = NULL;
6147 /* Delete if entry has been made invalid. */
6153 /* Delete if we cannot find this mem in the expression list. */
6154 for (i = 0; i < expr_hash_table_size && del; i++)
6156 for (expr = expr_hash_table[i];
6158 expr = expr->next_same_hash)
6159 if (expr_equiv_p (expr->expr, ptr->pattern))
6171 last->next = ptr->next;
6172 free_ldst_entry (ptr);
6177 pre_ldst_mems = pre_ldst_mems->next;
6178 free_ldst_entry (ptr);
6179 ptr = pre_ldst_mems;
6184 /* Set the expression field if we are keeping it. */
6191 /* Show the world what we've found. */
6192 if (gcse_file && pre_ldst_mems != NULL)
6193 print_ldst_list (gcse_file);
6196 /* This routine will take an expression which we are replacing with
6197 a reaching register, and update any stores that are needed if
6198 that expression is in the ld_motion list. Stores are updated by
6199 copying their SRC to the reaching register, and then storeing
6200 the reaching register into the store location. These keeps the
6201 correct value in the reaching register for the loads. */
6204 update_ld_motion_stores (expr)
6207 struct ls_expr * mem_ptr;
6209 if ((mem_ptr = find_rtx_in_ldst (expr->expr)))
6211 /* We can try to find just the REACHED stores, but is shouldn't
6212 matter to set the reaching reg everywhere... some might be
6213 dead and should be eliminated later. */
6215 /* We replace SET mem = expr with
6217 SET mem = reg , where reg is the
6218 reaching reg used in the load. */
6219 rtx list = mem_ptr->stores;
6221 for ( ; list != NULL_RTX; list = XEXP (list, 1))
6223 rtx insn = XEXP (list, 0);
6224 rtx pat = PATTERN (insn);
6225 rtx src = SET_SRC (pat);
6226 rtx reg = expr->reaching_reg;
6229 /* If we've already copied it, continue. */
6230 if (expr->reaching_reg == src)
6235 fprintf (gcse_file, "PRE: store updated with reaching reg ");
6236 print_rtl (gcse_file, expr->reaching_reg);
6237 fprintf (gcse_file, ":\n ");
6238 print_inline_rtx (gcse_file, insn, 8);
6239 fprintf (gcse_file, "\n");
6242 copy = gen_move_insn ( reg, SET_SRC (pat));
6243 new = emit_insn_before (copy, insn);
6244 record_one_set (REGNO (reg), new);
6245 set_block_for_new_insns (new, BLOCK_FOR_INSN (insn));
6246 SET_SRC (pat) = reg;
6248 /* un-recognize this pattern since it's probably different now. */
6249 INSN_CODE (insn) = -1;
6250 gcse_create_count++;
6255 /* Store motion code. */
6257 /* Used in computing the reverse edge graph bit vectors. */
6258 static sbitmap * st_antloc;
6260 /* Global holding the number of store expressions we are dealing with. */
6261 static int num_stores;
6264 /* Mark which registers are used by the mem, in the sbitmap used. */
6266 mark_mem_regs (x, used)
6270 register const char *fmt;
6273 if (GET_CODE (x) == REG)
6275 if (!TEST_BIT (used, REGNO (x)))
6277 SET_BIT (used, REGNO (x));
6283 fmt = GET_RTX_FORMAT (GET_CODE (x));
6284 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6288 if (mark_mem_regs (XEXP (x, i),used))
6291 else if (fmt[i] == 'E')
6292 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6293 if (mark_mem_regs (XVECEXP (x, i, j),used))
6301 /* Return non-zero if the register operands of expression X are killed
6302 before/after insn in basic block BB. */
6305 store_ops_ok (x, bb,insn, before)
6315 /* Repeat is used to turn tail-recursion into iteration. */
6321 code = GET_CODE (x);
6326 /* Okay, since the reg def chains are ordered by bb/insn
6327 (since that's how it calculates them, and even if it didn't,
6328 we could just sort them), we just walk until we find a def
6329 in our BB, then walk until we find a def after/before our
6330 insn, and if we find a reg def after/before our insn, in the
6331 same bb, we return the approriate value. If there is no
6332 such def, to prevent walking *every* reg def, we stop once
6333 we are out of our BB again. */
6334 struct df_link *currref;
6335 bool thereyet=FALSE;
6336 for (currref = df_analyzer->regs[REGNO(x)].defs;
6338 currref = currref->next)
6340 if (! (DF_REF_BB (currref->ref) == bb))
6349 if (INSN_UID (DF_REF_INSN (currref->ref)) >= INSN_UID (insn))
6354 if (INSN_UID (DF_REF_INSN (currref->ref)) < INSN_UID (insn))
6358 if (DF_REF_TYPE (currref->ref) == DF_REF_REG_DEF)
6390 i = GET_RTX_LENGTH (code) - 1;
6391 fmt = GET_RTX_FORMAT (code);
6397 rtx tem = XEXP (x, i);
6399 /* If we are about to do the last recursive call
6400 needed at this level, change it into iteration.
6401 This function is called enough to be worth it. */
6408 if (! store_ops_ok (tem, bb, insn, before))
6411 else if (fmt[i] == 'E')
6415 for (j = 0; j < XVECLEN (x, i); j++)
6417 if (! store_ops_ok (XVECEXP (x, i, j), bb, insn, before))
6426 /* Determine whether insn is MEM store pattern that we will consider
6427 moving. We'll consider moving pretty much anything that we can
6431 find_moveable_store (insn)
6434 struct ls_expr * ptr;
6435 rtx dest = PATTERN (insn);
6437 /* It's it's not a set, it's not a mem store we want to consider.
6438 Also, if it's an ASM, we certainly don't want to try to touch
6440 if (GET_CODE (dest) != SET
6441 || GET_CODE (SET_SRC (dest)) == ASM_OPERANDS)
6444 dest = SET_DEST (dest);
6446 if (GET_CODE (dest) != MEM || MEM_VOLATILE_P (dest)
6447 || GET_MODE (dest) == BLKmode)
6450 /* ??? Is this conservative, or just correct? We get more
6451 *candidates* without it, but i don't think we ever remove any
6452 stores where the address did vary. */
6453 if (rtx_addr_varies_p (XEXP (dest, 0), 0))
6456 ptr = ldst_entry (dest);
6457 ptr->stores = alloc_INSN_LIST (insn, ptr->stores);
6460 /* Perform store motion.
6461 Store motion is modeled as a lazy code motion problem, like PRE is
6462 above. The main diffence is that we want to move stores down as far
6463 as possible, so we have LCM work on the reverse flowgraph. */
6466 compute_store_table ()
6470 max_gcse_regno = max_reg_num ();
6473 /* Find all the stores we care about. */
6474 for (bb = 0; bb < n_basic_blocks; bb++)
6476 for (insn = BLOCK_END (bb);
6477 insn && insn != PREV_INSN (BLOCK_HEAD (bb));
6478 insn = PREV_INSN (insn))
6480 /* Ignore anything that is not a normal insn. */
6484 pat = PATTERN (insn);
6485 /* Now that we've marked regs, look for stores. */
6486 if (GET_CODE (pat) == SET)
6487 find_moveable_store (insn);
6491 ret = enumerate_ldsts ();
6495 fprintf (gcse_file, "Store Motion Expressions.\n");
6496 print_ldst_list (gcse_file);
6502 /* Check to see if the load X is aliased with STORE_PATTERN.
6503 If it is, it means that load kills the store.*/
6506 load_kills_store (x, store_pattern)
6507 rtx x, store_pattern;
6509 if (true_dependence (x, GET_MODE (x), store_pattern, rtx_addr_varies_p))
6514 /* Go through the entire insn X, looking for any loads which might
6515 alias, and therefore, kill, STORE_PATTERN. Return 1 if found. */
6518 find_loads (x, store_pattern)
6519 rtx x, store_pattern;
6525 if (GET_CODE (x) == SET)
6528 if (GET_CODE (x) == MEM)
6530 if (load_kills_store (x, store_pattern))
6534 /* Recursively process the insn. */
6535 fmt = GET_RTX_FORMAT (GET_CODE (x));
6537 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0 && !ret; i--)
6540 ret |= find_loads (XEXP (x, i), store_pattern);
6541 else if (fmt[i] == 'E')
6542 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6543 ret |= find_loads (XVECEXP (x, i, j), store_pattern);
6548 /* Check if INSN kills the store pattern X (is aliased with it).
6549 Return 1 if it it does. */
6552 store_killed_in_insn (x, insn)
6555 if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
6558 if (GET_CODE (insn) == CALL_INSN)
6560 if (CONST_CALL_P (insn))
6566 if (GET_CODE (PATTERN (insn)) == SET)
6568 rtx pat = PATTERN (insn);
6569 /* Check for memory stores to aliased objects. */
6570 if (GET_CODE (SET_DEST (pat)) == MEM && !expr_equiv_p (SET_DEST (pat), x))
6572 if (find_loads (SET_DEST (pat), x))
6575 return find_loads (SET_SRC (pat), x);
6578 return find_loads (PATTERN (insn), x);
6581 /* Returns 1 if the expression X is loaded or clobbered on or after INSN
6582 within basic block BB. */
6585 store_killed_after (x, insn, bb, testops)
6596 /* Check if the register operands of the store are OK in this block.*/
6597 if (!store_ops_ok (XEXP (x, 0), bb, insn, 0))
6601 insn && insn != NEXT_INSN (last);
6602 insn = NEXT_INSN (insn))
6603 if (store_killed_in_insn (x, insn))
6609 /* Returns 1 if the expression X is loaded or clobbered before INSN
6610 within basic block BB. */
6612 store_killed_before (x, insn, bb)
6616 rtx first = bb->head;
6619 return store_killed_in_insn (x, insn);
6620 /* Check if the register operands of the store are OK in this block.*/
6621 if (!store_ops_ok (XEXP (x, 0), bb, insn, 1))
6624 for (insn = PREV_INSN (insn) ;
6625 insn && insn != PREV_INSN (first);
6626 insn = PREV_INSN (insn))
6628 if (store_killed_in_insn (x, insn))
6634 #define ANTIC_STORE_LIST(x) ((x)->loads)
6635 #define AVAIL_STORE_LIST(x) ((x)->stores)
6637 /* Given the table of available store insns at the end of blocks,
6638 determine which ones are not killed by aliasing, and generate
6639 the appropriate vectors for gen and killed. */
6641 build_store_vectors ()
6646 struct ls_expr * ptr;
6647 sbitmap tested, *result;
6650 /* Build the gen_vector. This is any store in the table which is not killed
6651 by aliasing later in its block. */
6652 ae_gen = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, num_stores);
6653 sbitmap_vector_zero (ae_gen, n_basic_blocks);
6655 st_antloc = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, num_stores);
6656 sbitmap_vector_zero (st_antloc, n_basic_blocks);
6658 /* Note: In case someone needs something to optimize about store
6659 motion, here's the next place to look. We currently test one more
6660 basic block per store than necessary (at least). Since we know, at
6661 the end of this for loop, whether a store was killed in one of the
6662 basic blocks (We know both whether it's killed before, and killed
6663 after, the insn in the bb it resides in. So unless the insn
6664 consists of multiple store/loads, we know whether it was killed
6665 in the entire bb), we could avoid testing it for kill and transp in
6666 the next for loop. */
6667 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
6669 /* Put all the stores into either the antic list, or the avail list,
6671 rtx store_list = ptr->stores;
6672 ptr->stores = NULL_RTX;
6674 for (st = store_list; st != NULL; st = XEXP (st, 1))
6676 insn = XEXP (st, 0);
6677 bb = BLOCK_FOR_INSN (insn);
6678 if (!store_killed_after (ptr->pattern, insn, bb, 1))
6680 /* If we've already seen an availale expression in this block,
6681 we can delete the one we saw already (It occurs earlier in
6682 the block), and replace it with this one). We'll copy the
6683 old SRC expression to an unused register in case there
6684 are any side effects. */
6685 if (TEST_BIT (ae_gen[bb->index], ptr->index))
6687 /* Find previous store. */
6689 for (st = AVAIL_STORE_LIST (ptr); st ; st = XEXP (st, 1))
6690 if (BLOCK_FOR_INSN (XEXP (st, 0)) == bb)
6694 rtx r = gen_reg_rtx (GET_MODE (ptr->pattern));
6696 fprintf(gcse_file, "Removing redundant store:\n");
6697 replace_store_insn (r, XEXP (st, 0), bb);
6698 XEXP (st, 0) = insn;
6702 SET_BIT (ae_gen[bb->index], ptr->index);
6703 AVAIL_STORE_LIST (ptr) = alloc_INSN_LIST (insn,
6704 AVAIL_STORE_LIST (ptr));
6707 if (!store_killed_before (ptr->pattern, insn, bb))
6709 SET_BIT (st_antloc[BLOCK_NUM (insn)], ptr->index);
6710 ANTIC_STORE_LIST (ptr) = alloc_INSN_LIST (insn,
6711 ANTIC_STORE_LIST (ptr));
6715 /* Free the original list of store insns. */
6716 free_INSN_LIST_list (&store_list);
6719 ae_kill = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, num_stores);
6720 sbitmap_vector_zero (ae_kill, n_basic_blocks);
6723 transp = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, num_stores);
6724 sbitmap_vector_ones (transp, n_basic_blocks);
6726 tested = sbitmap_alloc (max_gcse_regno);
6727 sbitmap_zero (tested);
6728 result = sbitmap_vector_alloc (n_basic_blocks, max_gcse_regno);
6729 sbitmap_vector_zero (result, n_basic_blocks);
6730 used = sbitmap_alloc (max_gcse_regno);
6731 sbitmap_zero (used);
6733 /* This whole big nasty thing computes kill and transparent.
6734 It's done in this nasty way because profiling showed store motion
6735 taking twice as long as GCSE, with the cause being 1 million calls
6736 to store_ops_ok taking 30% of the entire runtime of the
6738 Since store most expressions use the same registers, there's no
6739 point in checking them 8 million times for the same basic blocks. If
6740 they weren't okay in a BB the last time we checked, they won't be
6741 okay now. Since we check all the bb's on each iteration, we don't
6742 need a vector for which registers we've tested, just the results.
6743 We then proceed to use the results of what store_ops_ok was for a
6744 given reg and bb, and if the results were a kill, we don't even need
6745 to check if the store was killed in the basic block, it'll be
6746 in the kill set because it's regs changed between here and there.
6749 If the whole store had no registers, we just skip store_ops_okay
6750 anyway (since it's checking reg operands), and proceed to see if
6751 it's okay in each bb, setting the approriate bits.
6753 With this in place, we now take almost no time at all to perform
6754 store motion. (It's not on the first page of the profile, it
6755 takes less than a second).
6759 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
6761 /* Make sure we don't have a load-only expr, which we never seem
6762 to, but i don't think there's actually a guarantee */
6763 if (ptr->stores != NULL)
6765 /* First mark the regs used by the mem */
6766 mark_mem_regs (ptr->pattern, used);
6767 /* Now see if it had any regs */
6768 if (!(sbitmap_first_set_bit (used) == -1))
6770 /* For each register, see if we've tested it */
6771 EXECUTE_IF_SET_IN_SBITMAP (used, 0, i,
6773 if (TEST_BIT (tested, i))
6775 /* Already tested the register, so check the
6776 result, and if we had an okay result, check the
6778 for (j = 0; j < n_basic_blocks; j++)
6780 if (!TEST_BIT (result[j], i)
6781 || store_killed_after (ptr->pattern, BLOCK_HEAD (j),
6782 BASIC_BLOCK (j), FALSE))
6784 SET_BIT (ae_kill[j], ptr->index);
6785 if (!TEST_BIT (ae_gen[j], ptr->index)
6786 || !TEST_BIT (st_antloc[j], ptr->index))
6787 RESET_BIT (transp[j], ptr->index);
6793 /* We haven't tested it yet, so mark it tested,
6794 and perform the tests */
6795 SET_BIT (tested, i);
6796 /* Check if it's okay in each BB */
6797 for (j = 0; j < n_basic_blocks; j++)
6799 if (store_ops_ok (XEXP (ptr->pattern, 0),
6800 BASIC_BLOCK (j), BLOCK_HEAD (j), 0))
6802 SET_BIT (result[j], ptr->index);
6806 /* It's not okay, so it's killed and maybe
6808 SET_BIT (ae_kill[j], ptr->index);
6809 if (!TEST_BIT (ae_gen[j], ptr->index)
6810 || !TEST_BIT (st_antloc[j], ptr->index))
6812 RESET_BIT (transp[j], ptr->index);
6816 /* The ops were okay, so check the store
6818 if (store_killed_after (ptr->pattern, BLOCK_HEAD (j),
6819 BASIC_BLOCK (j), FALSE))
6821 SET_BIT (ae_kill[j], ptr->index);
6822 if (!TEST_BIT (ae_gen[j], ptr->index)
6823 || !TEST_BIT (st_antloc[j], ptr->index))
6825 RESET_BIT (transp[j], ptr->index);
6831 /* Reset the used list */
6832 sbitmap_zero (used);
6834 /* If it had no registers, we come here, and do the
6835 approriate testing */
6838 for (j = 0; j < n_basic_blocks; j++)
6840 if (store_killed_after (ptr->pattern, BLOCK_HEAD (j),
6841 BASIC_BLOCK (j), FALSE))
6843 SET_BIT (ae_kill[j], ptr->index);
6844 if (!TEST_BIT (ae_gen[j], ptr->index)
6845 || !TEST_BIT (st_antloc[j], ptr->index))
6847 RESET_BIT (transp[j], ptr->index);
6854 sbitmap_free (tested);
6855 sbitmap_free (used);
6856 sbitmap_vector_free (result);
6859 /* Insert an instruction at the begining of a basic block, and update
6860 the BLOCK_HEAD if needed. */
6863 insert_insn_start_bb (insn, bb)
6867 /* Insert at start of successor block. */
6868 rtx prev = PREV_INSN (bb->head);
6869 rtx before = bb->head;
6872 if (GET_CODE (before) != CODE_LABEL
6873 && (GET_CODE (before) != NOTE
6874 || NOTE_LINE_NUMBER (before) != NOTE_INSN_BASIC_BLOCK))
6877 if (prev == bb->end)
6879 before = NEXT_INSN (before);
6882 insn = emit_insn_after (insn, prev);
6884 if (prev == bb->end)
6887 set_block_for_new_insns (insn, bb);
6891 fprintf (gcse_file, "STORE_MOTION insert store at start of BB %d:\n",
6893 print_inline_rtx (gcse_file, insn, 6);
6894 fprintf (gcse_file, "\n");
6898 /* This routine will insert a store on an edge. EXPR is the ldst entry for
6899 the memory reference, and E is the edge to insert it on. Returns non-zero
6900 if an edge insertion was performed. */
6903 insert_store (expr, e)
6904 struct ls_expr * expr;
6911 /* We did all the deleted before this insert, so if we didn't delete a
6912 store, then we haven't set the reaching reg yet either. */
6913 if (expr->reaching_reg == NULL_RTX)
6916 reg = expr->reaching_reg;
6917 insn = gen_move_insn (expr->pattern, reg);
6919 /* If we are inserting this expression on ALL predecessor edges of a BB,
6920 insert it at the start of the BB, and reset the insert bits on the other
6921 edges so we don;t try to insert it on the other edges. */
6923 for (tmp = e->dest->pred; tmp ; tmp = tmp->pred_next)
6925 int index = EDGE_INDEX (edge_list, tmp->src, tmp->dest);
6926 if (index == EDGE_INDEX_NO_EDGE)
6928 if (! TEST_BIT (pre_insert_map[index], expr->index))
6932 /* If tmp is NULL, we found an insertion on every edge, blank the
6933 insertion vector for these edges, and insert at the start of the BB. */
6934 if (!tmp && bb != EXIT_BLOCK_PTR)
6936 for (tmp = e->dest->pred; tmp ; tmp = tmp->pred_next)
6938 int index = EDGE_INDEX (edge_list, tmp->src, tmp->dest);
6939 RESET_BIT (pre_insert_map[index], expr->index);
6941 insert_insn_start_bb (insn, bb);
6945 /* We can't insert on this edge, so we'll insert at the head of the
6946 successors block. See Morgan, sec 10.5. */
6947 if ((e->flags & EDGE_ABNORMAL) == EDGE_ABNORMAL)
6949 insert_insn_start_bb (insn, bb);
6953 insert_insn_on_edge (insn, e);
6957 fprintf (gcse_file, "STORE_MOTION insert insn on edge (%d, %d):\n",
6958 e->src->index, e->dest->index);
6959 print_inline_rtx (gcse_file, insn, 6);
6960 fprintf (gcse_file, "\n");
6966 /* This routine will replace a store with a SET to a specified register. */
6969 replace_store_insn (reg, del, bb)
6975 insn = gen_move_insn (reg, SET_SRC (PATTERN (del)));
6976 insn = emit_insn_after (insn, del);
6977 set_block_for_new_insns (insn, bb);
6982 "STORE_MOTION delete insn in BB %d:\n ", bb->index);
6983 print_inline_rtx (gcse_file, del, 6);
6984 fprintf(gcse_file, "\nSTORE MOTION replaced with insn:\n ");
6985 print_inline_rtx (gcse_file, insn, 6);
6986 fprintf(gcse_file, "\n");
6992 if (bb->head == del)
6999 /* Delete a store, but copy the value that would have been stored into
7000 the reaching_reg for later storing. */
7003 delete_store (expr, bb)
7004 struct ls_expr * expr;
7009 if (expr->reaching_reg == NULL_RTX)
7010 expr->reaching_reg = gen_reg_rtx (GET_MODE (expr->pattern));
7013 /* If there is more than 1 store, the earlier ones will be dead,
7014 but it doesn't hurt to replace them here. */
7015 reg = expr->reaching_reg;
7017 for (i = AVAIL_STORE_LIST (expr); i; i = XEXP (i, 1))
7020 if (BLOCK_FOR_INSN (del) == bb)
7022 /* We know there is only one since we deleted redundant
7023 ones during the available computation. */
7024 replace_store_insn (reg, del, bb);
7030 /* Free memory used by store motion. */
7033 free_store_memory ()
7037 sbitmap_vector_free (ae_gen);
7039 sbitmap_vector_free (ae_kill);
7041 sbitmap_vector_free (transp);
7043 sbitmap_vector_free (st_antloc);
7045 sbitmap_vector_free (pre_insert_map);
7047 sbitmap_vector_free (pre_delete_map);
7049 ae_gen = ae_kill = transp = st_antloc = NULL;
7050 pre_insert_map = pre_delete_map = reg_set_in_block = NULL;
7053 /* Perform store motion. Much like gcse, except we move expressions the
7054 other way by looking at the flowgraph in reverse. */
7060 struct ls_expr * ptr;
7061 sbitmap trapping_expr;
7064 int update_flow = 0;
7067 fprintf (gcse_file, "before store motion\n");
7068 print_rtl (gcse_file, get_insns ());
7072 init_alias_analysis ();
7073 df_analyzer = df_init();
7074 df_analyse (df_analyzer, 0, DF_RD_CHAIN | DF_HARD_REGS);
7075 /* Find all the stores that are live to the end of their block. */
7076 num_stores = compute_store_table ();
7077 if (num_stores == 0)
7079 df_finish (df_analyzer);
7080 end_alias_analysis ();
7084 /* Now compute whats actually available to move. */
7085 add_noreturn_fake_exit_edges ();
7086 build_store_vectors ();
7088 /* Collect expressions which might trap. */
7089 trapping_expr = sbitmap_alloc (num_stores);
7090 sbitmap_zero (trapping_expr);
7091 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr(ptr))
7093 if (may_trap_p (ptr->pattern))
7094 SET_BIT (trapping_expr, ptr->index);
7096 for (i = 0; i < n_basic_blocks; i++)
7100 /* If the current block is the destination of an abnormal edge, we
7101 kill all trapping expressions because we won't be able to properly
7102 place the instruction on the edge. So make them neither
7103 anticipatable nor transparent. This is fairly conservative. */
7104 for (e = BASIC_BLOCK (i)->pred; e ; e = e->pred_next)
7105 if (e->flags & EDGE_ABNORMAL)
7107 sbitmap_difference (st_antloc[i], st_antloc[i], trapping_expr);
7108 sbitmap_difference (transp[i], transp[i], trapping_expr);
7113 edge_list = pre_edge_rev_lcm (gcse_file, num_stores, transp, ae_gen,
7114 st_antloc, ae_kill, &pre_insert_map,
7117 /* Now we want to insert the new stores which are going to be needed. */
7118 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
7120 for (x = 0; x < n_basic_blocks; x++)
7121 if (TEST_BIT (pre_delete_map[x], ptr->index))
7122 delete_store (ptr, BASIC_BLOCK (x));
7124 for (x = 0; x < NUM_EDGES (edge_list); x++)
7125 if (TEST_BIT (pre_insert_map[x], ptr->index))
7126 update_flow |= insert_store (ptr, INDEX_EDGE (edge_list, x));
7130 commit_edge_insertions ();
7131 sbitmap_free (trapping_expr);
7132 free_store_memory ();
7133 free_edge_list (edge_list);
7134 remove_fake_edges ();
7135 end_alias_analysis ();
7136 df_finish (df_analyzer);