1 /* Global common subexpression elimination/Partial redundancy elimination
2 and global constant/copy propagation for GNU compiler.
3 Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
24 - reordering of memory allocation and freeing to be more space efficient
25 - do rough calc of how many regs are needed in each block, and a rough
26 calc of how many regs are available in each class and use that to
27 throttle back the code in cases where RTX_COST is minimal.
28 - a store to the same address as a load does not kill the load if the
29 source of the store is also the destination of the load. Handling this
30 allows more load motion, particularly out of loops.
31 - ability to realloc sbitmap vectors would allow one initial computation
32 of reg_set_in_block with only subsequent additions, rather than
33 recomputing it for each pass
37 /* References searched while implementing this.
39 Compilers Principles, Techniques and Tools
43 Global Optimization by Suppression of Partial Redundancies
45 communications of the acm, Vol. 22, Num. 2, Feb. 1979
47 A Portable Machine-Independent Global Optimizer - Design and Measurements
49 Stanford Ph.D. thesis, Dec. 1983
51 A Fast Algorithm for Code Movement Optimization
53 SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988
55 A Solution to a Problem with Morel and Renvoise's
56 Global Optimization by Suppression of Partial Redundancies
57 K-H Drechsler, M.P. Stadel
58 ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988
60 Practical Adaptation of the Global Optimization
61 Algorithm of Morel and Renvoise
63 ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991
65 Efficiently Computing Static Single Assignment Form and the Control
67 R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck
68 ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991
71 J. Knoop, O. Ruthing, B. Steffen
72 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
74 What's In a Region? Or Computing Control Dependence Regions in Near-Linear
75 Time for Reducible Flow Control
77 ACM Letters on Programming Languages and Systems,
78 Vol. 2, Num. 1-4, Mar-Dec 1993
80 An Efficient Representation for Sparse Sets
81 Preston Briggs, Linda Torczon
82 ACM Letters on Programming Languages and Systems,
83 Vol. 2, Num. 1-4, Mar-Dec 1993
85 A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion
86 K-H Drechsler, M.P. Stadel
87 ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993
89 Partial Dead Code Elimination
90 J. Knoop, O. Ruthing, B. Steffen
91 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
93 Effective Partial Redundancy Elimination
94 P. Briggs, K.D. Cooper
95 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
97 The Program Structure Tree: Computing Control Regions in Linear Time
98 R. Johnson, D. Pearson, K. Pingali
99 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
101 Optimal Code Motion: Theory and Practice
102 J. Knoop, O. Ruthing, B. Steffen
103 ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994
105 The power of assignment motion
106 J. Knoop, O. Ruthing, B. Steffen
107 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
109 Global code motion / global value numbering
111 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
113 Value Driven Redundancy Elimination
115 Rice University Ph.D. thesis, Apr. 1996
119 Massively Scalar Compiler Project, Rice University, Sep. 1996
121 High Performance Compilers for Parallel Computing
125 Advanced Compiler Design and Implementation
127 Morgan Kaufmann, 1997
129 Building an Optimizing Compiler
133 People wishing to speed up the code here should read:
134 Elimination Algorithms for Data Flow Analysis
135 B.G. Ryder, M.C. Paull
136 ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986
138 How to Analyze Large Programs Efficiently and Informatively
139 D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck
140 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
142 People wishing to do something different can find various possibilities
143 in the above papers and elsewhere.
153 #include "hard-reg-set.h"
156 #include "insn-config.h"
158 #include "basic-block.h"
160 #include "function.h"
167 #define obstack_chunk_alloc gmalloc
168 #define obstack_chunk_free free
170 /* Propagate flow information through back edges and thus enable PRE's
171 moving loop invariant calculations out of loops.
173 Originally this tended to create worse overall code, but several
174 improvements during the development of PRE seem to have made following
175 back edges generally a win.
177 Note much of the loop invariant code motion done here would normally
178 be done by loop.c, which has more heuristics for when to move invariants
179 out of loops. At some point we might need to move some of those
180 heuristics into gcse.c. */
181 #define FOLLOW_BACK_EDGES 1
183 /* We support GCSE via Partial Redundancy Elimination. PRE optimizations
184 are a superset of those done by GCSE.
186 We perform the following steps:
188 1) Compute basic block information.
190 2) Compute table of places where registers are set.
192 3) Perform copy/constant propagation.
194 4) Perform global cse.
196 5) Perform another pass of copy/constant propagation.
198 Two passes of copy/constant propagation are done because the first one
199 enables more GCSE and the second one helps to clean up the copies that
200 GCSE creates. This is needed more for PRE than for Classic because Classic
201 GCSE will try to use an existing register containing the common
202 subexpression rather than create a new one. This is harder to do for PRE
203 because of the code motion (which Classic GCSE doesn't do).
205 Expressions we are interested in GCSE-ing are of the form
206 (set (pseudo-reg) (expression)).
207 Function want_to_gcse_p says what these are.
209 PRE handles moving invariant expressions out of loops (by treating them as
210 partially redundant).
212 Eventually it would be nice to replace cse.c/gcse.c with SSA (static single
213 assignment) based GVN (global value numbering). L. T. Simpson's paper
214 (Rice University) on value numbering is a useful reference for this.
216 **********************
218 We used to support multiple passes but there are diminishing returns in
219 doing so. The first pass usually makes 90% of the changes that are doable.
220 A second pass can make a few more changes made possible by the first pass.
221 Experiments show any further passes don't make enough changes to justify
224 A study of spec92 using an unlimited number of passes:
225 [1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83,
226 [6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2,
227 [12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1
229 It was found doing copy propagation between each pass enables further
232 PRE is quite expensive in complicated functions because the DFA can take
233 awhile to converge. Hence we only perform one pass. The parameter max-gcse-passes can
234 be modified if one wants to experiment.
236 **********************
238 The steps for PRE are:
240 1) Build the hash table of expressions we wish to GCSE (expr_hash_table).
242 2) Perform the data flow analysis for PRE.
244 3) Delete the redundant instructions
246 4) Insert the required copies [if any] that make the partially
247 redundant instructions fully redundant.
249 5) For other reaching expressions, insert an instruction to copy the value
250 to a newly created pseudo that will reach the redundant instruction.
252 The deletion is done first so that when we do insertions we
253 know which pseudo reg to use.
255 Various papers have argued that PRE DFA is expensive (O(n^2)) and others
256 argue it is not. The number of iterations for the algorithm to converge
257 is typically 2-4 so I don't view it as that expensive (relatively speaking).
259 PRE GCSE depends heavily on the second CSE pass to clean up the copies
260 we create. To make an expression reach the place where it's redundant,
261 the result of the expression is copied to a new register, and the redundant
262 expression is deleted by replacing it with this new register. Classic GCSE
263 doesn't have this problem as much as it computes the reaching defs of
264 each register in each block and thus can try to use an existing register.
266 **********************
268 A fair bit of simplicity is created by creating small functions for simple
269 tasks, even when the function is only called in one place. This may
270 measurably slow things down [or may not] by creating more function call
271 overhead than is necessary. The source is laid out so that it's trivial
272 to make the affected functions inline so that one can measure what speed
273 up, if any, can be achieved, and maybe later when things settle things can
276 Help stamp out big monolithic functions! */
278 /* GCSE global vars. */
281 static FILE *gcse_file;
283 /* Note whether or not we should run jump optimization after gcse. We
284 want to do this for two cases.
286 * If we changed any jumps via cprop.
288 * If we added any labels via edge splitting. */
290 static int run_jump_opt_after_gcse;
292 /* Bitmaps are normally not included in debugging dumps.
293 However it's useful to be able to print them from GDB.
294 We could create special functions for this, but it's simpler to
295 just allow passing stderr to the dump_foo fns. Since stderr can
296 be a macro, we store a copy here. */
297 static FILE *debug_stderr;
299 /* An obstack for our working variables. */
300 static struct obstack gcse_obstack;
302 /* Non-zero for each mode that supports (set (reg) (reg)).
303 This is trivially true for integer and floating point values.
304 It may or may not be true for condition codes. */
305 static char can_copy_p[(int) NUM_MACHINE_MODES];
307 /* Non-zero if can_copy_p has been initialized. */
308 static int can_copy_init_p;
310 struct reg_use {rtx reg_rtx; };
312 /* Hash table of expressions. */
316 /* The expression (SET_SRC for expressions, PATTERN for assignments). */
318 /* Index in the available expression bitmaps. */
320 /* Next entry with the same hash. */
321 struct expr *next_same_hash;
322 /* List of anticipatable occurrences in basic blocks in the function.
323 An "anticipatable occurrence" is one that is the first occurrence in the
324 basic block, the operands are not modified in the basic block prior
325 to the occurrence and the output is not used between the start of
326 the block and the occurrence. */
327 struct occr *antic_occr;
328 /* List of available occurrence in basic blocks in the function.
329 An "available occurrence" is one that is the last occurrence in the
330 basic block and the operands are not modified by following statements in
331 the basic block [including this insn]. */
332 struct occr *avail_occr;
333 /* Non-null if the computation is PRE redundant.
334 The value is the newly created pseudo-reg to record a copy of the
335 expression in all the places that reach the redundant copy. */
339 /* Occurrence of an expression.
340 There is one per basic block. If a pattern appears more than once the
341 last appearance is used [or first for anticipatable expressions]. */
345 /* Next occurrence of this expression. */
347 /* The insn that computes the expression. */
349 /* Non-zero if this [anticipatable] occurrence has been deleted. */
351 /* Non-zero if this [available] occurrence has been copied to
353 /* ??? This is mutually exclusive with deleted_p, so they could share
358 /* Expression and copy propagation hash tables.
359 Each hash table is an array of buckets.
360 ??? It is known that if it were an array of entries, structure elements
361 `next_same_hash' and `bitmap_index' wouldn't be necessary. However, it is
362 not clear whether in the final analysis a sufficient amount of memory would
363 be saved as the size of the available expression bitmaps would be larger
364 [one could build a mapping table without holes afterwards though].
365 Someday I'll perform the computation and figure it out. */
367 /* Total size of the expression hash table, in elements. */
368 static unsigned int expr_hash_table_size;
371 This is an array of `expr_hash_table_size' elements. */
372 static struct expr **expr_hash_table;
374 /* Total size of the copy propagation hash table, in elements. */
375 static unsigned int set_hash_table_size;
378 This is an array of `set_hash_table_size' elements. */
379 static struct expr **set_hash_table;
381 /* Mapping of uids to cuids.
382 Only real insns get cuids. */
383 static int *uid_cuid;
385 /* Highest UID in UID_CUID. */
388 /* Get the cuid of an insn. */
389 #ifdef ENABLE_CHECKING
390 #define INSN_CUID(INSN) (INSN_UID (INSN) > max_uid ? (abort (), 0) : uid_cuid[INSN_UID (INSN)])
392 #define INSN_CUID(INSN) (uid_cuid[INSN_UID (INSN)])
395 /* Number of cuids. */
398 /* Mapping of cuids to insns. */
399 static rtx *cuid_insn;
401 /* Get insn from cuid. */
402 #define CUID_INSN(CUID) (cuid_insn[CUID])
404 /* Maximum register number in function prior to doing gcse + 1.
405 Registers created during this pass have regno >= max_gcse_regno.
406 This is named with "gcse" to not collide with global of same name. */
407 static unsigned int max_gcse_regno;
409 /* Maximum number of cse-able expressions found. */
412 /* Maximum number of assignments for copy propagation found. */
415 /* Table of registers that are modified.
417 For each register, each element is a list of places where the pseudo-reg
420 For simplicity, GCSE is done on sets of pseudo-regs only. PRE GCSE only
421 requires knowledge of which blocks kill which regs [and thus could use
422 a bitmap instead of the lists `reg_set_table' uses].
424 `reg_set_table' and could be turned into an array of bitmaps (num-bbs x
425 num-regs) [however perhaps it may be useful to keep the data as is]. One
426 advantage of recording things this way is that `reg_set_table' is fairly
427 sparse with respect to pseudo regs but for hard regs could be fairly dense
428 [relatively speaking]. And recording sets of pseudo-regs in lists speeds
429 up functions like compute_transp since in the case of pseudo-regs we only
430 need to iterate over the number of times a pseudo-reg is set, not over the
431 number of basic blocks [clearly there is a bit of a slow down in the cases
432 where a pseudo is set more than once in a block, however it is believed
433 that the net effect is to speed things up]. This isn't done for hard-regs
434 because recording call-clobbered hard-regs in `reg_set_table' at each
435 function call can consume a fair bit of memory, and iterating over
436 hard-regs stored this way in compute_transp will be more expensive. */
438 typedef struct reg_set
440 /* The next setting of this register. */
441 struct reg_set *next;
442 /* The insn where it was set. */
446 static reg_set **reg_set_table;
448 /* Size of `reg_set_table'.
449 The table starts out at max_gcse_regno + slop, and is enlarged as
451 static int reg_set_table_size;
453 /* Amount to grow `reg_set_table' by when it's full. */
454 #define REG_SET_TABLE_SLOP 100
456 /* This is a list of expressions which are MEMs and will be used by load
458 Load motion tracks MEMs which aren't killed by
459 anything except itself. (ie, loads and stores to a single location).
460 We can then allow movement of these MEM refs with a little special
461 allowance. (all stores copy the same value to the reaching reg used
462 for the loads). This means all values used to store into memory must have
463 no side effects so we can re-issue the setter value.
464 Store Motion uses this structure as an expression table to track stores
465 which look interesting, and might be moveable towards the exit block. */
469 struct expr * expr; /* Gcse expression reference for LM. */
470 rtx pattern; /* Pattern of this mem. */
471 rtx loads; /* INSN list of loads seen. */
472 rtx stores; /* INSN list of stores seen. */
473 struct ls_expr * next; /* Next in the list. */
474 int invalid; /* Invalid for some reason. */
475 int index; /* If it maps to a bitmap index. */
476 int hash_index; /* Index when in a hash table. */
477 rtx reaching_reg; /* Register to use when re-writing. */
480 /* Head of the list of load/store memory refs. */
481 static struct ls_expr * pre_ldst_mems = NULL;
483 /* Bitmap containing one bit for each register in the program.
484 Used when performing GCSE to track which registers have been set since
485 the start of the basic block. */
486 static regset reg_set_bitmap;
488 /* For each block, a bitmap of registers set in the block.
489 This is used by expr_killed_p and compute_transp.
490 It is computed during hash table computation and not by compute_sets
491 as it includes registers added since the last pass (or between cprop and
492 gcse) and it's currently not easy to realloc sbitmap vectors. */
493 static sbitmap *reg_set_in_block;
495 /* Array, indexed by basic block number for a list of insns which modify
496 memory within that block. */
497 static rtx * modify_mem_list;
498 bitmap modify_mem_list_set;
500 /* This array parallels modify_mem_list, but is kept canonicalized. */
501 static rtx * canon_modify_mem_list;
502 bitmap canon_modify_mem_list_set;
503 /* Various variables for statistics gathering. */
505 /* Memory used in a pass.
506 This isn't intended to be absolutely precise. Its intent is only
507 to keep an eye on memory usage. */
508 static int bytes_used;
510 /* GCSE substitutions made. */
511 static int gcse_subst_count;
512 /* Number of copy instructions created. */
513 static int gcse_create_count;
514 /* Number of constants propagated. */
515 static int const_prop_count;
516 /* Number of copys propagated. */
517 static int copy_prop_count;
519 /* These variables are used by classic GCSE.
520 Normally they'd be defined a bit later, but `rd_gen' needs to
521 be declared sooner. */
523 /* Each block has a bitmap of each type.
524 The length of each blocks bitmap is:
526 max_cuid - for reaching definitions
527 n_exprs - for available expressions
529 Thus we view the bitmaps as 2 dimensional arrays. i.e.
530 rd_kill[block_num][cuid_num]
531 ae_kill[block_num][expr_num] */
533 /* For reaching defs */
534 static sbitmap *rd_kill, *rd_gen, *reaching_defs, *rd_out;
536 /* for available exprs */
537 static sbitmap *ae_kill, *ae_gen, *ae_in, *ae_out;
539 /* Objects of this type are passed around by the null-pointer check
541 struct null_pointer_info
543 /* The basic block being processed. */
545 /* The first register to be handled in this pass. */
546 unsigned int min_reg;
547 /* One greater than the last register to be handled in this pass. */
548 unsigned int max_reg;
549 sbitmap *nonnull_local;
550 sbitmap *nonnull_killed;
553 static void compute_can_copy PARAMS ((void));
554 static char *gmalloc PARAMS ((unsigned int));
555 static char *grealloc PARAMS ((char *, unsigned int));
556 static char *gcse_alloc PARAMS ((unsigned long));
557 static void alloc_gcse_mem PARAMS ((rtx));
558 static void free_gcse_mem PARAMS ((void));
559 static void alloc_reg_set_mem PARAMS ((int));
560 static void free_reg_set_mem PARAMS ((void));
561 static int get_bitmap_width PARAMS ((int, int, int));
562 static void record_one_set PARAMS ((int, rtx));
563 static void record_set_info PARAMS ((rtx, rtx, void *));
564 static void compute_sets PARAMS ((rtx));
565 static void hash_scan_insn PARAMS ((rtx, int, int));
566 static void hash_scan_set PARAMS ((rtx, rtx, int));
567 static void hash_scan_clobber PARAMS ((rtx, rtx));
568 static void hash_scan_call PARAMS ((rtx, rtx));
569 static int want_to_gcse_p PARAMS ((rtx));
570 static int oprs_unchanged_p PARAMS ((rtx, rtx, int));
571 static int oprs_anticipatable_p PARAMS ((rtx, rtx));
572 static int oprs_available_p PARAMS ((rtx, rtx));
573 static void insert_expr_in_table PARAMS ((rtx, enum machine_mode, rtx,
575 static void insert_set_in_table PARAMS ((rtx, rtx));
576 static unsigned int hash_expr PARAMS ((rtx, enum machine_mode, int *, int));
577 static unsigned int hash_expr_1 PARAMS ((rtx, enum machine_mode, int *));
578 static unsigned int hash_string_1 PARAMS ((const char *));
579 static unsigned int hash_set PARAMS ((int, int));
580 static int expr_equiv_p PARAMS ((rtx, rtx));
581 static void record_last_reg_set_info PARAMS ((rtx, int));
582 static void record_last_mem_set_info PARAMS ((rtx));
583 static void record_last_set_info PARAMS ((rtx, rtx, void *));
584 static void compute_hash_table PARAMS ((int));
585 static void alloc_set_hash_table PARAMS ((int));
586 static void free_set_hash_table PARAMS ((void));
587 static void compute_set_hash_table PARAMS ((void));
588 static void alloc_expr_hash_table PARAMS ((unsigned int));
589 static void free_expr_hash_table PARAMS ((void));
590 static void compute_expr_hash_table PARAMS ((void));
591 static void dump_hash_table PARAMS ((FILE *, const char *, struct expr **,
593 static struct expr *lookup_expr PARAMS ((rtx));
594 static struct expr *lookup_set PARAMS ((unsigned int, rtx));
595 static struct expr *next_set PARAMS ((unsigned int, struct expr *));
596 static void reset_opr_set_tables PARAMS ((void));
597 static int oprs_not_set_p PARAMS ((rtx, rtx));
598 static void mark_call PARAMS ((rtx));
599 static void mark_set PARAMS ((rtx, rtx));
600 static void mark_clobber PARAMS ((rtx, rtx));
601 static void mark_oprs_set PARAMS ((rtx));
602 static void alloc_cprop_mem PARAMS ((int, int));
603 static void free_cprop_mem PARAMS ((void));
604 static void compute_transp PARAMS ((rtx, int, sbitmap *, int));
605 static void compute_transpout PARAMS ((void));
606 static void compute_local_properties PARAMS ((sbitmap *, sbitmap *, sbitmap *,
608 static void compute_cprop_data PARAMS ((void));
609 static void find_used_regs PARAMS ((rtx *, void *));
610 static int try_replace_reg PARAMS ((rtx, rtx, rtx));
611 static struct expr *find_avail_set PARAMS ((int, rtx));
612 static int cprop_jump PARAMS ((basic_block, rtx, rtx, rtx));
614 static int cprop_cc0_jump PARAMS ((basic_block, rtx, struct reg_use *, rtx));
616 static void mems_conflict_for_gcse_p PARAMS ((rtx, rtx, void *));
617 static int load_killed_in_block_p PARAMS ((basic_block, int, rtx, int));
618 static void canon_list_insert PARAMS ((rtx, rtx, void *));
619 static int cprop_insn PARAMS ((basic_block, rtx, int));
620 static int cprop PARAMS ((int));
621 static int one_cprop_pass PARAMS ((int, int));
622 static void alloc_pre_mem PARAMS ((int, int));
623 static void free_pre_mem PARAMS ((void));
624 static void compute_pre_data PARAMS ((void));
625 static int pre_expr_reaches_here_p PARAMS ((basic_block, struct expr *,
627 static void insert_insn_end_bb PARAMS ((struct expr *, basic_block, int));
628 static void pre_insert_copy_insn PARAMS ((struct expr *, rtx));
629 static void pre_insert_copies PARAMS ((void));
630 static int pre_delete PARAMS ((void));
631 static int pre_gcse PARAMS ((void));
632 static int one_pre_gcse_pass PARAMS ((int));
633 static void add_label_notes PARAMS ((rtx, rtx));
634 static void alloc_code_hoist_mem PARAMS ((int, int));
635 static void free_code_hoist_mem PARAMS ((void));
636 static void compute_code_hoist_vbeinout PARAMS ((void));
637 static void compute_code_hoist_data PARAMS ((void));
638 static int hoist_expr_reaches_here_p PARAMS ((basic_block, int, basic_block,
640 static void hoist_code PARAMS ((void));
641 static int one_code_hoisting_pass PARAMS ((void));
642 static void alloc_rd_mem PARAMS ((int, int));
643 static void free_rd_mem PARAMS ((void));
644 static void handle_rd_kill_set PARAMS ((rtx, int, basic_block));
645 static void compute_kill_rd PARAMS ((void));
646 static void compute_rd PARAMS ((void));
647 static void alloc_avail_expr_mem PARAMS ((int, int));
648 static void free_avail_expr_mem PARAMS ((void));
649 static void compute_ae_gen PARAMS ((void));
650 static int expr_killed_p PARAMS ((rtx, basic_block));
651 static void compute_ae_kill PARAMS ((sbitmap *, sbitmap *));
652 static int expr_reaches_here_p PARAMS ((struct occr *, struct expr *,
654 static rtx computing_insn PARAMS ((struct expr *, rtx));
655 static int def_reaches_here_p PARAMS ((rtx, rtx));
656 static int can_disregard_other_sets PARAMS ((struct reg_set **, rtx, int));
657 static int handle_avail_expr PARAMS ((rtx, struct expr *));
658 static int classic_gcse PARAMS ((void));
659 static int one_classic_gcse_pass PARAMS ((int));
660 static void invalidate_nonnull_info PARAMS ((rtx, rtx, void *));
661 static void delete_null_pointer_checks_1 PARAMS ((unsigned int *,
662 sbitmap *, sbitmap *,
663 struct null_pointer_info *));
664 static rtx process_insert_insn PARAMS ((struct expr *));
665 static int pre_edge_insert PARAMS ((struct edge_list *, struct expr **));
666 static int expr_reaches_here_p_work PARAMS ((struct occr *, struct expr *,
667 basic_block, int, char *));
668 static int pre_expr_reaches_here_p_work PARAMS ((basic_block, struct expr *,
669 basic_block, char *));
670 static struct ls_expr * ldst_entry PARAMS ((rtx));
671 static void free_ldst_entry PARAMS ((struct ls_expr *));
672 static void free_ldst_mems PARAMS ((void));
673 static void print_ldst_list PARAMS ((FILE *));
674 static struct ls_expr * find_rtx_in_ldst PARAMS ((rtx));
675 static int enumerate_ldsts PARAMS ((void));
676 static inline struct ls_expr * first_ls_expr PARAMS ((void));
677 static inline struct ls_expr * next_ls_expr PARAMS ((struct ls_expr *));
678 static int simple_mem PARAMS ((rtx));
679 static void invalidate_any_buried_refs PARAMS ((rtx));
680 static void compute_ld_motion_mems PARAMS ((void));
681 static void trim_ld_motion_mems PARAMS ((void));
682 static void update_ld_motion_stores PARAMS ((struct expr *));
683 static void reg_set_info PARAMS ((rtx, rtx, void *));
684 static int store_ops_ok PARAMS ((rtx, basic_block));
685 static void find_moveable_store PARAMS ((rtx));
686 static int compute_store_table PARAMS ((void));
687 static int load_kills_store PARAMS ((rtx, rtx));
688 static int find_loads PARAMS ((rtx, rtx));
689 static int store_killed_in_insn PARAMS ((rtx, rtx));
690 static int store_killed_after PARAMS ((rtx, rtx, basic_block));
691 static int store_killed_before PARAMS ((rtx, rtx, basic_block));
692 static void build_store_vectors PARAMS ((void));
693 static void insert_insn_start_bb PARAMS ((rtx, basic_block));
694 static int insert_store PARAMS ((struct ls_expr *, edge));
695 static void replace_store_insn PARAMS ((rtx, rtx, basic_block));
696 static void delete_store PARAMS ((struct ls_expr *,
698 static void free_store_memory PARAMS ((void));
699 static void store_motion PARAMS ((void));
700 static void clear_modify_mem_tables PARAMS ((void));
701 static void free_modify_mem_tables PARAMS ((void));
703 /* Entry point for global common subexpression elimination.
704 F is the first instruction in the function. */
712 /* Bytes used at start of pass. */
713 int initial_bytes_used;
714 /* Maximum number of bytes used by a pass. */
716 /* Point to release obstack data from for each pass. */
717 char *gcse_obstack_bottom;
719 /* Insertion of instructions on edges can create new basic blocks; we
720 need the original basic block count so that we can properly deallocate
721 arrays sized on the number of basic blocks originally in the cfg. */
723 /* We do not construct an accurate cfg in functions which call
724 setjmp, so just punt to be safe. */
725 if (current_function_calls_setjmp)
728 /* Assume that we do not need to run jump optimizations after gcse. */
729 run_jump_opt_after_gcse = 0;
731 /* For calling dump_foo fns from gdb. */
732 debug_stderr = stderr;
735 /* Identify the basic block information for this function, including
736 successors and predecessors. */
737 max_gcse_regno = max_reg_num ();
740 dump_flow_info (file);
742 orig_bb_count = n_basic_blocks;
743 /* Return if there's nothing to do. */
744 if (n_basic_blocks <= 1)
747 /* Trying to perform global optimizations on flow graphs which have
748 a high connectivity will take a long time and is unlikely to be
751 In normal circumstances a cfg should have about twice as many edges
752 as blocks. But we do not want to punish small functions which have
753 a couple switch statements. So we require a relatively large number
754 of basic blocks and the ratio of edges to blocks to be high. */
755 if (n_basic_blocks > 1000 && n_edges / n_basic_blocks >= 20)
757 if (warn_disabled_optimization)
758 warning ("GCSE disabled: %d > 1000 basic blocks and %d >= 20 edges/basic block",
759 n_basic_blocks, n_edges / n_basic_blocks);
763 /* If allocating memory for the cprop bitmap would take up too much
764 storage it's better just to disable the optimization. */
766 * SBITMAP_SET_SIZE (max_gcse_regno)
767 * sizeof (SBITMAP_ELT_TYPE)) > MAX_GCSE_MEMORY)
769 if (warn_disabled_optimization)
770 warning ("GCSE disabled: %d basic blocks and %d registers",
771 n_basic_blocks, max_gcse_regno);
776 /* See what modes support reg/reg copy operations. */
777 if (! can_copy_init_p)
783 gcc_obstack_init (&gcse_obstack);
787 init_alias_analysis ();
788 /* Record where pseudo-registers are set. This data is kept accurate
789 during each pass. ??? We could also record hard-reg information here
790 [since it's unchanging], however it is currently done during hash table
793 It may be tempting to compute MEM set information here too, but MEM sets
794 will be subject to code motion one day and thus we need to compute
795 information about memory sets when we build the hash tables. */
797 alloc_reg_set_mem (max_gcse_regno);
801 initial_bytes_used = bytes_used;
803 gcse_obstack_bottom = gcse_alloc (1);
805 while (changed && pass < MAX_GCSE_PASSES)
809 fprintf (file, "GCSE pass %d\n\n", pass + 1);
811 /* Initialize bytes_used to the space for the pred/succ lists,
812 and the reg_set_table data. */
813 bytes_used = initial_bytes_used;
815 /* Each pass may create new registers, so recalculate each time. */
816 max_gcse_regno = max_reg_num ();
820 /* Don't allow constant propagation to modify jumps
822 changed = one_cprop_pass (pass + 1, 0);
825 changed |= one_classic_gcse_pass (pass + 1);
828 changed |= one_pre_gcse_pass (pass + 1);
829 /* We may have just created new basic blocks. Release and
830 recompute various things which are sized on the number of
834 free_modify_mem_tables ();
836 = (rtx *) gmalloc (n_basic_blocks * sizeof (rtx));
837 canon_modify_mem_list
838 = (rtx *) gmalloc (n_basic_blocks * sizeof (rtx));
839 memset ((char *) modify_mem_list, 0, n_basic_blocks * sizeof (rtx));
840 memset ((char *) canon_modify_mem_list, 0, n_basic_blocks * sizeof (rtx));
841 orig_bb_count = n_basic_blocks;
844 alloc_reg_set_mem (max_reg_num ());
846 run_jump_opt_after_gcse = 1;
849 if (max_pass_bytes < bytes_used)
850 max_pass_bytes = bytes_used;
852 /* Free up memory, then reallocate for code hoisting. We can
853 not re-use the existing allocated memory because the tables
854 will not have info for the insns or registers created by
855 partial redundancy elimination. */
858 /* It does not make sense to run code hoisting unless we optimizing
859 for code size -- it rarely makes programs faster, and can make
860 them bigger if we did partial redundancy elimination (when optimizing
861 for space, we use a classic gcse algorithm instead of partial
862 redundancy algorithms). */
865 max_gcse_regno = max_reg_num ();
867 changed |= one_code_hoisting_pass ();
870 if (max_pass_bytes < bytes_used)
871 max_pass_bytes = bytes_used;
876 fprintf (file, "\n");
880 obstack_free (&gcse_obstack, gcse_obstack_bottom);
884 /* Do one last pass of copy propagation, including cprop into
885 conditional jumps. */
887 max_gcse_regno = max_reg_num ();
889 /* This time, go ahead and allow cprop to alter jumps. */
890 one_cprop_pass (pass + 1, 1);
895 fprintf (file, "GCSE of %s: %d basic blocks, ",
896 current_function_name, n_basic_blocks);
897 fprintf (file, "%d pass%s, %d bytes\n\n",
898 pass, pass > 1 ? "es" : "", max_pass_bytes);
901 obstack_free (&gcse_obstack, NULL);
903 /* We are finished with alias. */
904 end_alias_analysis ();
905 allocate_reg_info (max_reg_num (), FALSE, FALSE);
907 if (!optimize_size && flag_gcse_sm)
909 /* Record where pseudo-registers are set. */
910 return run_jump_opt_after_gcse;
913 /* Misc. utilities. */
915 /* Compute which modes support reg/reg copy operations. */
921 #ifndef AVOID_CCMODE_COPIES
924 memset (can_copy_p, 0, NUM_MACHINE_MODES);
927 for (i = 0; i < NUM_MACHINE_MODES; i++)
928 if (GET_MODE_CLASS (i) == MODE_CC)
930 #ifdef AVOID_CCMODE_COPIES
933 reg = gen_rtx_REG ((enum machine_mode) i, LAST_VIRTUAL_REGISTER + 1);
934 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, reg));
935 if (recog (PATTERN (insn), insn, NULL) >= 0)
945 /* Cover function to xmalloc to record bytes allocated. */
952 return xmalloc (size);
955 /* Cover function to xrealloc.
956 We don't record the additional size since we don't know it.
957 It won't affect memory usage stats much anyway. */
964 return xrealloc (ptr, size);
967 /* Cover function to obstack_alloc.
968 We don't need to record the bytes allocated here since
969 obstack_chunk_alloc is set to gmalloc. */
975 return (char *) obstack_alloc (&gcse_obstack, size);
978 /* Allocate memory for the cuid mapping array,
979 and reg/memory set tracking tables.
981 This is called at the start of each pass. */
990 /* Find the largest UID and create a mapping from UIDs to CUIDs.
991 CUIDs are like UIDs except they increase monotonically, have no gaps,
992 and only apply to real insns. */
994 max_uid = get_max_uid ();
995 n = (max_uid + 1) * sizeof (int);
996 uid_cuid = (int *) gmalloc (n);
997 memset ((char *) uid_cuid, 0, n);
998 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
1001 uid_cuid[INSN_UID (insn)] = i++;
1003 uid_cuid[INSN_UID (insn)] = i;
1006 /* Create a table mapping cuids to insns. */
1009 n = (max_cuid + 1) * sizeof (rtx);
1010 cuid_insn = (rtx *) gmalloc (n);
1011 memset ((char *) cuid_insn, 0, n);
1012 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
1014 CUID_INSN (i++) = insn;
1016 /* Allocate vars to track sets of regs. */
1017 reg_set_bitmap = BITMAP_XMALLOC ();
1019 /* Allocate vars to track sets of regs, memory per block. */
1020 reg_set_in_block = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks,
1022 /* Allocate array to keep a list of insns which modify memory in each
1024 modify_mem_list = (rtx *) gmalloc (n_basic_blocks * sizeof (rtx));
1025 canon_modify_mem_list = (rtx *) gmalloc (n_basic_blocks * sizeof (rtx));
1026 memset ((char *) modify_mem_list, 0, n_basic_blocks * sizeof (rtx));
1027 memset ((char *) canon_modify_mem_list, 0, n_basic_blocks * sizeof (rtx));
1028 modify_mem_list_set = BITMAP_XMALLOC ();
1029 canon_modify_mem_list_set = BITMAP_XMALLOC ();
1032 /* Free memory allocated by alloc_gcse_mem. */
1040 BITMAP_XFREE (reg_set_bitmap);
1042 sbitmap_vector_free (reg_set_in_block);
1043 free_modify_mem_tables ();
1044 BITMAP_XFREE (modify_mem_list_set);
1045 BITMAP_XFREE (canon_modify_mem_list_set);
1048 /* Many of the global optimization algorithms work by solving dataflow
1049 equations for various expressions. Initially, some local value is
1050 computed for each expression in each block. Then, the values across the
1051 various blocks are combined (by following flow graph edges) to arrive at
1052 global values. Conceptually, each set of equations is independent. We
1053 may therefore solve all the equations in parallel, solve them one at a
1054 time, or pick any intermediate approach.
1056 When you're going to need N two-dimensional bitmaps, each X (say, the
1057 number of blocks) by Y (say, the number of expressions), call this
1058 function. It's not important what X and Y represent; only that Y
1059 correspond to the things that can be done in parallel. This function will
1060 return an appropriate chunking factor C; you should solve C sets of
1061 equations in parallel. By going through this function, we can easily
1062 trade space against time; by solving fewer equations in parallel we use
1066 get_bitmap_width (n, x, y)
1071 /* It's not really worth figuring out *exactly* how much memory will
1072 be used by a particular choice. The important thing is to get
1073 something approximately right. */
1074 size_t max_bitmap_memory = 10 * 1024 * 1024;
1076 /* The number of bytes we'd use for a single column of minimum
1078 size_t column_size = n * x * sizeof (SBITMAP_ELT_TYPE);
1080 /* Often, it's reasonable just to solve all the equations in
1082 if (column_size * SBITMAP_SET_SIZE (y) <= max_bitmap_memory)
1085 /* Otherwise, pick the largest width we can, without going over the
1087 return SBITMAP_ELT_BITS * ((max_bitmap_memory + column_size - 1)
1091 /* Compute the local properties of each recorded expression.
1093 Local properties are those that are defined by the block, irrespective of
1096 An expression is transparent in a block if its operands are not modified
1099 An expression is computed (locally available) in a block if it is computed
1100 at least once and expression would contain the same value if the
1101 computation was moved to the end of the block.
1103 An expression is locally anticipatable in a block if it is computed at
1104 least once and expression would contain the same value if the computation
1105 was moved to the beginning of the block.
1107 We call this routine for cprop, pre and code hoisting. They all compute
1108 basically the same information and thus can easily share this code.
1110 TRANSP, COMP, and ANTLOC are destination sbitmaps for recording local
1111 properties. If NULL, then it is not necessary to compute or record that
1112 particular property.
1114 SETP controls which hash table to look at. If zero, this routine looks at
1115 the expr hash table; if nonzero this routine looks at the set hash table.
1116 Additionally, TRANSP is computed as ~TRANSP, since this is really cprop's
1120 compute_local_properties (transp, comp, antloc, setp)
1126 unsigned int i, hash_table_size;
1127 struct expr **hash_table;
1129 /* Initialize any bitmaps that were passed in. */
1133 sbitmap_vector_zero (transp, n_basic_blocks);
1135 sbitmap_vector_ones (transp, n_basic_blocks);
1139 sbitmap_vector_zero (comp, n_basic_blocks);
1141 sbitmap_vector_zero (antloc, n_basic_blocks);
1143 /* We use the same code for cprop, pre and hoisting. For cprop
1144 we care about the set hash table, for pre and hoisting we
1145 care about the expr hash table. */
1146 hash_table_size = setp ? set_hash_table_size : expr_hash_table_size;
1147 hash_table = setp ? set_hash_table : expr_hash_table;
1149 for (i = 0; i < hash_table_size; i++)
1153 for (expr = hash_table[i]; expr != NULL; expr = expr->next_same_hash)
1155 int indx = expr->bitmap_index;
1158 /* The expression is transparent in this block if it is not killed.
1159 We start by assuming all are transparent [none are killed], and
1160 then reset the bits for those that are. */
1162 compute_transp (expr->expr, indx, transp, setp);
1164 /* The occurrences recorded in antic_occr are exactly those that
1165 we want to set to non-zero in ANTLOC. */
1167 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
1169 SET_BIT (antloc[BLOCK_NUM (occr->insn)], indx);
1171 /* While we're scanning the table, this is a good place to
1173 occr->deleted_p = 0;
1176 /* The occurrences recorded in avail_occr are exactly those that
1177 we want to set to non-zero in COMP. */
1179 for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
1181 SET_BIT (comp[BLOCK_NUM (occr->insn)], indx);
1183 /* While we're scanning the table, this is a good place to
1188 /* While we're scanning the table, this is a good place to
1190 expr->reaching_reg = 0;
1195 /* Register set information.
1197 `reg_set_table' records where each register is set or otherwise
1200 static struct obstack reg_set_obstack;
1203 alloc_reg_set_mem (n_regs)
1208 reg_set_table_size = n_regs + REG_SET_TABLE_SLOP;
1209 n = reg_set_table_size * sizeof (struct reg_set *);
1210 reg_set_table = (struct reg_set **) gmalloc (n);
1211 memset ((char *) reg_set_table, 0, n);
1213 gcc_obstack_init (®_set_obstack);
1219 free (reg_set_table);
1220 obstack_free (®_set_obstack, NULL);
1223 /* Record REGNO in the reg_set table. */
1226 record_one_set (regno, insn)
1230 /* Allocate a new reg_set element and link it onto the list. */
1231 struct reg_set *new_reg_info;
1233 /* If the table isn't big enough, enlarge it. */
1234 if (regno >= reg_set_table_size)
1236 int new_size = regno + REG_SET_TABLE_SLOP;
1239 = (struct reg_set **) grealloc ((char *) reg_set_table,
1240 new_size * sizeof (struct reg_set *));
1241 memset ((char *) (reg_set_table + reg_set_table_size), 0,
1242 (new_size - reg_set_table_size) * sizeof (struct reg_set *));
1243 reg_set_table_size = new_size;
1246 new_reg_info = (struct reg_set *) obstack_alloc (®_set_obstack,
1247 sizeof (struct reg_set));
1248 bytes_used += sizeof (struct reg_set);
1249 new_reg_info->insn = insn;
1250 new_reg_info->next = reg_set_table[regno];
1251 reg_set_table[regno] = new_reg_info;
1254 /* Called from compute_sets via note_stores to handle one SET or CLOBBER in
1255 an insn. The DATA is really the instruction in which the SET is
1259 record_set_info (dest, setter, data)
1260 rtx dest, setter ATTRIBUTE_UNUSED;
1263 rtx record_set_insn = (rtx) data;
1265 if (GET_CODE (dest) == REG && REGNO (dest) >= FIRST_PSEUDO_REGISTER)
1266 record_one_set (REGNO (dest), record_set_insn);
1269 /* Scan the function and record each set of each pseudo-register.
1271 This is called once, at the start of the gcse pass. See the comments for
1272 `reg_set_table' for further documenation. */
1280 for (insn = f; insn != 0; insn = NEXT_INSN (insn))
1282 note_stores (PATTERN (insn), record_set_info, insn);
1285 /* Hash table support. */
1287 /* For each register, the cuid of the first/last insn in the block
1288 that set it, or -1 if not set. */
1289 #define NEVER_SET -1
1291 struct reg_avail_info
1298 static struct reg_avail_info *reg_avail_info;
1299 static int current_bb;
1302 /* See whether X, the source of a set, is something we want to consider for
1309 static rtx test_insn = 0;
1310 int num_clobbers = 0;
1313 switch (GET_CODE (x))
1327 /* If this is a valid operand, we are OK. If it's VOIDmode, we aren't. */
1328 if (general_operand (x, GET_MODE (x)))
1330 else if (GET_MODE (x) == VOIDmode)
1333 /* Otherwise, check if we can make a valid insn from it. First initialize
1334 our test insn if we haven't already. */
1338 = make_insn_raw (gen_rtx_SET (VOIDmode,
1339 gen_rtx_REG (word_mode,
1340 FIRST_PSEUDO_REGISTER * 2),
1342 NEXT_INSN (test_insn) = PREV_INSN (test_insn) = 0;
1343 ggc_add_rtx_root (&test_insn, 1);
1346 /* Now make an insn like the one we would make when GCSE'ing and see if
1348 PUT_MODE (SET_DEST (PATTERN (test_insn)), GET_MODE (x));
1349 SET_SRC (PATTERN (test_insn)) = x;
1350 return ((icode = recog (PATTERN (test_insn), test_insn, &num_clobbers)) >= 0
1351 && (num_clobbers == 0 || ! added_clobbers_hard_reg_p (icode)));
1354 /* Return non-zero if the operands of expression X are unchanged from the
1355 start of INSN's basic block up to but not including INSN (if AVAIL_P == 0),
1356 or from INSN to the end of INSN's basic block (if AVAIL_P != 0). */
1359 oprs_unchanged_p (x, insn, avail_p)
1370 code = GET_CODE (x);
1375 struct reg_avail_info *info = ®_avail_info[REGNO (x)];
1377 if (info->last_bb != current_bb)
1380 return info->last_set < INSN_CUID (insn);
1382 return info->first_set >= INSN_CUID (insn);
1386 if (load_killed_in_block_p (BASIC_BLOCK (current_bb), INSN_CUID (insn),
1390 return oprs_unchanged_p (XEXP (x, 0), insn, avail_p);
1416 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
1420 /* If we are about to do the last recursive call needed at this
1421 level, change it into iteration. This function is called enough
1424 return oprs_unchanged_p (XEXP (x, i), insn, avail_p);
1426 else if (! oprs_unchanged_p (XEXP (x, i), insn, avail_p))
1429 else if (fmt[i] == 'E')
1430 for (j = 0; j < XVECLEN (x, i); j++)
1431 if (! oprs_unchanged_p (XVECEXP (x, i, j), insn, avail_p))
1438 /* Used for communication between mems_conflict_for_gcse_p and
1439 load_killed_in_block_p. Nonzero if mems_conflict_for_gcse_p finds a
1440 conflict between two memory references. */
1441 static int gcse_mems_conflict_p;
1443 /* Used for communication between mems_conflict_for_gcse_p and
1444 load_killed_in_block_p. A memory reference for a load instruction,
1445 mems_conflict_for_gcse_p will see if a memory store conflicts with
1446 this memory load. */
1447 static rtx gcse_mem_operand;
1449 /* DEST is the output of an instruction. If it is a memory reference, and
1450 possibly conflicts with the load found in gcse_mem_operand, then set
1451 gcse_mems_conflict_p to a nonzero value. */
1454 mems_conflict_for_gcse_p (dest, setter, data)
1455 rtx dest, setter ATTRIBUTE_UNUSED;
1456 void *data ATTRIBUTE_UNUSED;
1458 while (GET_CODE (dest) == SUBREG
1459 || GET_CODE (dest) == ZERO_EXTRACT
1460 || GET_CODE (dest) == SIGN_EXTRACT
1461 || GET_CODE (dest) == STRICT_LOW_PART)
1462 dest = XEXP (dest, 0);
1464 /* If DEST is not a MEM, then it will not conflict with the load. Note
1465 that function calls are assumed to clobber memory, but are handled
1467 if (GET_CODE (dest) != MEM)
1470 /* If we are setting a MEM in our list of specially recognized MEMs,
1471 don't mark as killed this time. */
1473 if (dest == gcse_mem_operand && pre_ldst_mems != NULL)
1475 if (!find_rtx_in_ldst (dest))
1476 gcse_mems_conflict_p = 1;
1480 if (true_dependence (dest, GET_MODE (dest), gcse_mem_operand,
1482 gcse_mems_conflict_p = 1;
1485 /* Return nonzero if the expression in X (a memory reference) is killed
1486 in block BB before or after the insn with the CUID in UID_LIMIT.
1487 AVAIL_P is nonzero for kills after UID_LIMIT, and zero for kills
1490 To check the entire block, set UID_LIMIT to max_uid + 1 and
1494 load_killed_in_block_p (bb, uid_limit, x, avail_p)
1500 rtx list_entry = modify_mem_list[bb->index];
1504 /* Ignore entries in the list that do not apply. */
1506 && INSN_CUID (XEXP (list_entry, 0)) < uid_limit)
1508 && INSN_CUID (XEXP (list_entry, 0)) > uid_limit))
1510 list_entry = XEXP (list_entry, 1);
1514 setter = XEXP (list_entry, 0);
1516 /* If SETTER is a call everything is clobbered. Note that calls
1517 to pure functions are never put on the list, so we need not
1518 worry about them. */
1519 if (GET_CODE (setter) == CALL_INSN)
1522 /* SETTER must be an INSN of some kind that sets memory. Call
1523 note_stores to examine each hunk of memory that is modified.
1525 The note_stores interface is pretty limited, so we have to
1526 communicate via global variables. Yuk. */
1527 gcse_mem_operand = x;
1528 gcse_mems_conflict_p = 0;
1529 note_stores (PATTERN (setter), mems_conflict_for_gcse_p, NULL);
1530 if (gcse_mems_conflict_p)
1532 list_entry = XEXP (list_entry, 1);
1537 /* Return non-zero if the operands of expression X are unchanged from
1538 the start of INSN's basic block up to but not including INSN. */
1541 oprs_anticipatable_p (x, insn)
1544 return oprs_unchanged_p (x, insn, 0);
1547 /* Return non-zero if the operands of expression X are unchanged from
1548 INSN to the end of INSN's basic block. */
1551 oprs_available_p (x, insn)
1554 return oprs_unchanged_p (x, insn, 1);
1557 /* Hash expression X.
1559 MODE is only used if X is a CONST_INT. DO_NOT_RECORD_P is a boolean
1560 indicating if a volatile operand is found or if the expression contains
1561 something we don't want to insert in the table.
1563 ??? One might want to merge this with canon_hash. Later. */
1566 hash_expr (x, mode, do_not_record_p, hash_table_size)
1568 enum machine_mode mode;
1569 int *do_not_record_p;
1570 int hash_table_size;
1574 *do_not_record_p = 0;
1576 hash = hash_expr_1 (x, mode, do_not_record_p);
1577 return hash % hash_table_size;
1580 /* Hash a string. Just add its bytes up. */
1582 static inline unsigned
1587 const unsigned char *p = (const unsigned char *) ps;
1596 /* Subroutine of hash_expr to do the actual work. */
1599 hash_expr_1 (x, mode, do_not_record_p)
1601 enum machine_mode mode;
1602 int *do_not_record_p;
1609 /* Used to turn recursion into iteration. We can't rely on GCC's
1610 tail-recursion eliminatio since we need to keep accumulating values
1617 code = GET_CODE (x);
1621 hash += ((unsigned int) REG << 7) + REGNO (x);
1625 hash += (((unsigned int) CONST_INT << 7) + (unsigned int) mode
1626 + (unsigned int) INTVAL (x));
1630 /* This is like the general case, except that it only counts
1631 the integers representing the constant. */
1632 hash += (unsigned int) code + (unsigned int) GET_MODE (x);
1633 if (GET_MODE (x) != VOIDmode)
1634 for (i = 2; i < GET_RTX_LENGTH (CONST_DOUBLE); i++)
1635 hash += (unsigned int) XWINT (x, i);
1637 hash += ((unsigned int) CONST_DOUBLE_LOW (x)
1638 + (unsigned int) CONST_DOUBLE_HIGH (x));
1646 units = CONST_VECTOR_NUNITS (x);
1648 for (i = 0; i < units; ++i)
1650 elt = CONST_VECTOR_ELT (x, i);
1651 hash += hash_expr_1 (elt, GET_MODE (elt), do_not_record_p);
1657 /* Assume there is only one rtx object for any given label. */
1659 /* We don't hash on the address of the CODE_LABEL to avoid bootstrap
1660 differences and differences between each stage's debugging dumps. */
1661 hash += (((unsigned int) LABEL_REF << 7)
1662 + CODE_LABEL_NUMBER (XEXP (x, 0)));
1667 /* Don't hash on the symbol's address to avoid bootstrap differences.
1668 Different hash values may cause expressions to be recorded in
1669 different orders and thus different registers to be used in the
1670 final assembler. This also avoids differences in the dump files
1671 between various stages. */
1673 const unsigned char *p = (const unsigned char *) XSTR (x, 0);
1676 h += (h << 7) + *p++; /* ??? revisit */
1678 hash += ((unsigned int) SYMBOL_REF << 7) + h;
1683 if (MEM_VOLATILE_P (x))
1685 *do_not_record_p = 1;
1689 hash += (unsigned int) MEM;
1690 hash += MEM_ALIAS_SET (x);
1701 case UNSPEC_VOLATILE:
1702 *do_not_record_p = 1;
1706 if (MEM_VOLATILE_P (x))
1708 *do_not_record_p = 1;
1713 /* We don't want to take the filename and line into account. */
1714 hash += (unsigned) code + (unsigned) GET_MODE (x)
1715 + hash_string_1 (ASM_OPERANDS_TEMPLATE (x))
1716 + hash_string_1 (ASM_OPERANDS_OUTPUT_CONSTRAINT (x))
1717 + (unsigned) ASM_OPERANDS_OUTPUT_IDX (x);
1719 if (ASM_OPERANDS_INPUT_LENGTH (x))
1721 for (i = 1; i < ASM_OPERANDS_INPUT_LENGTH (x); i++)
1723 hash += (hash_expr_1 (ASM_OPERANDS_INPUT (x, i),
1724 GET_MODE (ASM_OPERANDS_INPUT (x, i)),
1726 + hash_string_1 (ASM_OPERANDS_INPUT_CONSTRAINT
1730 hash += hash_string_1 (ASM_OPERANDS_INPUT_CONSTRAINT (x, 0));
1731 x = ASM_OPERANDS_INPUT (x, 0);
1732 mode = GET_MODE (x);
1742 hash += (unsigned) code + (unsigned) GET_MODE (x);
1743 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
1747 /* If we are about to do the last recursive call
1748 needed at this level, change it into iteration.
1749 This function is called enough to be worth it. */
1756 hash += hash_expr_1 (XEXP (x, i), 0, do_not_record_p);
1757 if (*do_not_record_p)
1761 else if (fmt[i] == 'E')
1762 for (j = 0; j < XVECLEN (x, i); j++)
1764 hash += hash_expr_1 (XVECEXP (x, i, j), 0, do_not_record_p);
1765 if (*do_not_record_p)
1769 else if (fmt[i] == 's')
1770 hash += hash_string_1 (XSTR (x, i));
1771 else if (fmt[i] == 'i')
1772 hash += (unsigned int) XINT (x, i);
1780 /* Hash a set of register REGNO.
1782 Sets are hashed on the register that is set. This simplifies the PRE copy
1785 ??? May need to make things more elaborate. Later, as necessary. */
1788 hash_set (regno, hash_table_size)
1790 int hash_table_size;
1795 return hash % hash_table_size;
1798 /* Return non-zero if exp1 is equivalent to exp2.
1799 ??? Borrowed from cse.c. Might want to remerge with cse.c. Later. */
1812 if (x == 0 || y == 0)
1815 code = GET_CODE (x);
1816 if (code != GET_CODE (y))
1819 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. */
1820 if (GET_MODE (x) != GET_MODE (y))
1830 return INTVAL (x) == INTVAL (y);
1833 return XEXP (x, 0) == XEXP (y, 0);
1836 return XSTR (x, 0) == XSTR (y, 0);
1839 return REGNO (x) == REGNO (y);
1842 /* Can't merge two expressions in different alias sets, since we can
1843 decide that the expression is transparent in a block when it isn't,
1844 due to it being set with the different alias set. */
1845 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
1849 /* For commutative operations, check both orders. */
1857 return ((expr_equiv_p (XEXP (x, 0), XEXP (y, 0))
1858 && expr_equiv_p (XEXP (x, 1), XEXP (y, 1)))
1859 || (expr_equiv_p (XEXP (x, 0), XEXP (y, 1))
1860 && expr_equiv_p (XEXP (x, 1), XEXP (y, 0))));
1863 /* We don't use the generic code below because we want to
1864 disregard filename and line numbers. */
1866 /* A volatile asm isn't equivalent to any other. */
1867 if (MEM_VOLATILE_P (x) || MEM_VOLATILE_P (y))
1870 if (GET_MODE (x) != GET_MODE (y)
1871 || strcmp (ASM_OPERANDS_TEMPLATE (x), ASM_OPERANDS_TEMPLATE (y))
1872 || strcmp (ASM_OPERANDS_OUTPUT_CONSTRAINT (x),
1873 ASM_OPERANDS_OUTPUT_CONSTRAINT (y))
1874 || ASM_OPERANDS_OUTPUT_IDX (x) != ASM_OPERANDS_OUTPUT_IDX (y)
1875 || ASM_OPERANDS_INPUT_LENGTH (x) != ASM_OPERANDS_INPUT_LENGTH (y))
1878 if (ASM_OPERANDS_INPUT_LENGTH (x))
1880 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
1881 if (! expr_equiv_p (ASM_OPERANDS_INPUT (x, i),
1882 ASM_OPERANDS_INPUT (y, i))
1883 || strcmp (ASM_OPERANDS_INPUT_CONSTRAINT (x, i),
1884 ASM_OPERANDS_INPUT_CONSTRAINT (y, i)))
1894 /* Compare the elements. If any pair of corresponding elements
1895 fail to match, return 0 for the whole thing. */
1897 fmt = GET_RTX_FORMAT (code);
1898 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1903 if (! expr_equiv_p (XEXP (x, i), XEXP (y, i)))
1908 if (XVECLEN (x, i) != XVECLEN (y, i))
1910 for (j = 0; j < XVECLEN (x, i); j++)
1911 if (! expr_equiv_p (XVECEXP (x, i, j), XVECEXP (y, i, j)))
1916 if (strcmp (XSTR (x, i), XSTR (y, i)))
1921 if (XINT (x, i) != XINT (y, i))
1926 if (XWINT (x, i) != XWINT (y, i))
1941 /* Insert expression X in INSN in the hash table.
1942 If it is already present, record it as the last occurrence in INSN's
1945 MODE is the mode of the value X is being stored into.
1946 It is only used if X is a CONST_INT.
1948 ANTIC_P is non-zero if X is an anticipatable expression.
1949 AVAIL_P is non-zero if X is an available expression. */
1952 insert_expr_in_table (x, mode, insn, antic_p, avail_p)
1954 enum machine_mode mode;
1956 int antic_p, avail_p;
1958 int found, do_not_record_p;
1960 struct expr *cur_expr, *last_expr = NULL;
1961 struct occr *antic_occr, *avail_occr;
1962 struct occr *last_occr = NULL;
1964 hash = hash_expr (x, mode, &do_not_record_p, expr_hash_table_size);
1966 /* Do not insert expression in table if it contains volatile operands,
1967 or if hash_expr determines the expression is something we don't want
1968 to or can't handle. */
1969 if (do_not_record_p)
1972 cur_expr = expr_hash_table[hash];
1975 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
1977 /* If the expression isn't found, save a pointer to the end of
1979 last_expr = cur_expr;
1980 cur_expr = cur_expr->next_same_hash;
1985 cur_expr = (struct expr *) gcse_alloc (sizeof (struct expr));
1986 bytes_used += sizeof (struct expr);
1987 if (expr_hash_table[hash] == NULL)
1988 /* This is the first pattern that hashed to this index. */
1989 expr_hash_table[hash] = cur_expr;
1991 /* Add EXPR to end of this hash chain. */
1992 last_expr->next_same_hash = cur_expr;
1994 /* Set the fields of the expr element. */
1996 cur_expr->bitmap_index = n_exprs++;
1997 cur_expr->next_same_hash = NULL;
1998 cur_expr->antic_occr = NULL;
1999 cur_expr->avail_occr = NULL;
2002 /* Now record the occurrence(s). */
2005 antic_occr = cur_expr->antic_occr;
2007 /* Search for another occurrence in the same basic block. */
2008 while (antic_occr && BLOCK_NUM (antic_occr->insn) != BLOCK_NUM (insn))
2010 /* If an occurrence isn't found, save a pointer to the end of
2012 last_occr = antic_occr;
2013 antic_occr = antic_occr->next;
2017 /* Found another instance of the expression in the same basic block.
2018 Prefer the currently recorded one. We want the first one in the
2019 block and the block is scanned from start to end. */
2020 ; /* nothing to do */
2023 /* First occurrence of this expression in this basic block. */
2024 antic_occr = (struct occr *) gcse_alloc (sizeof (struct occr));
2025 bytes_used += sizeof (struct occr);
2026 /* First occurrence of this expression in any block? */
2027 if (cur_expr->antic_occr == NULL)
2028 cur_expr->antic_occr = antic_occr;
2030 last_occr->next = antic_occr;
2032 antic_occr->insn = insn;
2033 antic_occr->next = NULL;
2039 avail_occr = cur_expr->avail_occr;
2041 /* Search for another occurrence in the same basic block. */
2042 while (avail_occr && BLOCK_NUM (avail_occr->insn) != BLOCK_NUM (insn))
2044 /* If an occurrence isn't found, save a pointer to the end of
2046 last_occr = avail_occr;
2047 avail_occr = avail_occr->next;
2051 /* Found another instance of the expression in the same basic block.
2052 Prefer this occurrence to the currently recorded one. We want
2053 the last one in the block and the block is scanned from start
2055 avail_occr->insn = insn;
2058 /* First occurrence of this expression in this basic block. */
2059 avail_occr = (struct occr *) gcse_alloc (sizeof (struct occr));
2060 bytes_used += sizeof (struct occr);
2062 /* First occurrence of this expression in any block? */
2063 if (cur_expr->avail_occr == NULL)
2064 cur_expr->avail_occr = avail_occr;
2066 last_occr->next = avail_occr;
2068 avail_occr->insn = insn;
2069 avail_occr->next = NULL;
2074 /* Insert pattern X in INSN in the hash table.
2075 X is a SET of a reg to either another reg or a constant.
2076 If it is already present, record it as the last occurrence in INSN's
2080 insert_set_in_table (x, insn)
2086 struct expr *cur_expr, *last_expr = NULL;
2087 struct occr *cur_occr, *last_occr = NULL;
2089 if (GET_CODE (x) != SET
2090 || GET_CODE (SET_DEST (x)) != REG)
2093 hash = hash_set (REGNO (SET_DEST (x)), set_hash_table_size);
2095 cur_expr = set_hash_table[hash];
2098 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
2100 /* If the expression isn't found, save a pointer to the end of
2102 last_expr = cur_expr;
2103 cur_expr = cur_expr->next_same_hash;
2108 cur_expr = (struct expr *) gcse_alloc (sizeof (struct expr));
2109 bytes_used += sizeof (struct expr);
2110 if (set_hash_table[hash] == NULL)
2111 /* This is the first pattern that hashed to this index. */
2112 set_hash_table[hash] = cur_expr;
2114 /* Add EXPR to end of this hash chain. */
2115 last_expr->next_same_hash = cur_expr;
2117 /* Set the fields of the expr element.
2118 We must copy X because it can be modified when copy propagation is
2119 performed on its operands. */
2120 cur_expr->expr = copy_rtx (x);
2121 cur_expr->bitmap_index = n_sets++;
2122 cur_expr->next_same_hash = NULL;
2123 cur_expr->antic_occr = NULL;
2124 cur_expr->avail_occr = NULL;
2127 /* Now record the occurrence. */
2128 cur_occr = cur_expr->avail_occr;
2130 /* Search for another occurrence in the same basic block. */
2131 while (cur_occr && BLOCK_NUM (cur_occr->insn) != BLOCK_NUM (insn))
2133 /* If an occurrence isn't found, save a pointer to the end of
2135 last_occr = cur_occr;
2136 cur_occr = cur_occr->next;
2140 /* Found another instance of the expression in the same basic block.
2141 Prefer this occurrence to the currently recorded one. We want the
2142 last one in the block and the block is scanned from start to end. */
2143 cur_occr->insn = insn;
2146 /* First occurrence of this expression in this basic block. */
2147 cur_occr = (struct occr *) gcse_alloc (sizeof (struct occr));
2148 bytes_used += sizeof (struct occr);
2150 /* First occurrence of this expression in any block? */
2151 if (cur_expr->avail_occr == NULL)
2152 cur_expr->avail_occr = cur_occr;
2154 last_occr->next = cur_occr;
2156 cur_occr->insn = insn;
2157 cur_occr->next = NULL;
2161 /* Scan pattern PAT of INSN and add an entry to the hash table. If SET_P is
2162 non-zero, this is for the assignment hash table, otherwise it is for the
2163 expression hash table. */
2166 hash_scan_set (pat, insn, set_p)
2170 rtx src = SET_SRC (pat);
2171 rtx dest = SET_DEST (pat);
2174 if (GET_CODE (src) == CALL)
2175 hash_scan_call (src, insn);
2177 else if (GET_CODE (dest) == REG)
2179 unsigned int regno = REGNO (dest);
2182 /* If this is a single set and we are doing constant propagation,
2183 see if a REG_NOTE shows this equivalent to a constant. */
2184 if (set_p && (note = find_reg_equal_equiv_note (insn)) != 0
2185 && CONSTANT_P (XEXP (note, 0)))
2186 src = XEXP (note, 0), pat = gen_rtx_SET (VOIDmode, dest, src);
2188 /* Only record sets of pseudo-regs in the hash table. */
2190 && regno >= FIRST_PSEUDO_REGISTER
2191 /* Don't GCSE something if we can't do a reg/reg copy. */
2192 && can_copy_p [GET_MODE (dest)]
2193 /* GCSE commonly inserts instruction after the insn. We can't
2194 do that easily for EH_REGION notes so disable GCSE on these
2196 && !find_reg_note (insn, REG_EH_REGION, NULL_RTX)
2197 /* Is SET_SRC something we want to gcse? */
2198 && want_to_gcse_p (src)
2199 /* Don't CSE a nop. */
2200 && ! set_noop_p (pat)
2201 /* Don't GCSE if it has attached REG_EQUIV note.
2202 At this point this only function parameters should have
2203 REG_EQUIV notes and if the argument slot is used somewhere
2204 explicitly, it means address of parameter has been taken,
2205 so we should not extend the lifetime of the pseudo. */
2206 && ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) == 0
2207 || GET_CODE (XEXP (note, 0)) != MEM))
2209 /* An expression is not anticipatable if its operands are
2210 modified before this insn or if this is not the only SET in
2212 int antic_p = oprs_anticipatable_p (src, insn) && single_set (insn);
2213 /* An expression is not available if its operands are
2214 subsequently modified, including this insn. It's also not
2215 available if this is a branch, because we can't insert
2216 a set after the branch. */
2217 int avail_p = (oprs_available_p (src, insn)
2218 && ! JUMP_P (insn));
2220 insert_expr_in_table (src, GET_MODE (dest), insn, antic_p, avail_p);
2223 /* Record sets for constant/copy propagation. */
2225 && regno >= FIRST_PSEUDO_REGISTER
2226 && ((GET_CODE (src) == REG
2227 && REGNO (src) >= FIRST_PSEUDO_REGISTER
2228 && can_copy_p [GET_MODE (dest)]
2229 && REGNO (src) != regno)
2230 || CONSTANT_P (src))
2231 /* A copy is not available if its src or dest is subsequently
2232 modified. Here we want to search from INSN+1 on, but
2233 oprs_available_p searches from INSN on. */
2234 && (insn == BLOCK_END (BLOCK_NUM (insn))
2235 || ((tmp = next_nonnote_insn (insn)) != NULL_RTX
2236 && oprs_available_p (pat, tmp))))
2237 insert_set_in_table (pat, insn);
2242 hash_scan_clobber (x, insn)
2243 rtx x ATTRIBUTE_UNUSED, insn ATTRIBUTE_UNUSED;
2245 /* Currently nothing to do. */
2249 hash_scan_call (x, insn)
2250 rtx x ATTRIBUTE_UNUSED, insn ATTRIBUTE_UNUSED;
2252 /* Currently nothing to do. */
2255 /* Process INSN and add hash table entries as appropriate.
2257 Only available expressions that set a single pseudo-reg are recorded.
2259 Single sets in a PARALLEL could be handled, but it's an extra complication
2260 that isn't dealt with right now. The trick is handling the CLOBBERs that
2261 are also in the PARALLEL. Later.
2263 If SET_P is non-zero, this is for the assignment hash table,
2264 otherwise it is for the expression hash table.
2265 If IN_LIBCALL_BLOCK nonzero, we are in a libcall block, and should
2266 not record any expressions. */
2269 hash_scan_insn (insn, set_p, in_libcall_block)
2272 int in_libcall_block;
2274 rtx pat = PATTERN (insn);
2277 if (in_libcall_block)
2280 /* Pick out the sets of INSN and for other forms of instructions record
2281 what's been modified. */
2283 if (GET_CODE (pat) == SET)
2284 hash_scan_set (pat, insn, set_p);
2285 else if (GET_CODE (pat) == PARALLEL)
2286 for (i = 0; i < XVECLEN (pat, 0); i++)
2288 rtx x = XVECEXP (pat, 0, i);
2290 if (GET_CODE (x) == SET)
2291 hash_scan_set (x, insn, set_p);
2292 else if (GET_CODE (x) == CLOBBER)
2293 hash_scan_clobber (x, insn);
2294 else if (GET_CODE (x) == CALL)
2295 hash_scan_call (x, insn);
2298 else if (GET_CODE (pat) == CLOBBER)
2299 hash_scan_clobber (pat, insn);
2300 else if (GET_CODE (pat) == CALL)
2301 hash_scan_call (pat, insn);
2305 dump_hash_table (file, name, table, table_size, total_size)
2308 struct expr **table;
2309 int table_size, total_size;
2312 /* Flattened out table, so it's printed in proper order. */
2313 struct expr **flat_table;
2314 unsigned int *hash_val;
2318 = (struct expr **) xcalloc (total_size, sizeof (struct expr *));
2319 hash_val = (unsigned int *) xmalloc (total_size * sizeof (unsigned int));
2321 for (i = 0; i < table_size; i++)
2322 for (expr = table[i]; expr != NULL; expr = expr->next_same_hash)
2324 flat_table[expr->bitmap_index] = expr;
2325 hash_val[expr->bitmap_index] = i;
2328 fprintf (file, "%s hash table (%d buckets, %d entries)\n",
2329 name, table_size, total_size);
2331 for (i = 0; i < total_size; i++)
2332 if (flat_table[i] != 0)
2334 expr = flat_table[i];
2335 fprintf (file, "Index %d (hash value %d)\n ",
2336 expr->bitmap_index, hash_val[i]);
2337 print_rtl (file, expr->expr);
2338 fprintf (file, "\n");
2341 fprintf (file, "\n");
2347 /* Record register first/last/block set information for REGNO in INSN.
2349 first_set records the first place in the block where the register
2350 is set and is used to compute "anticipatability".
2352 last_set records the last place in the block where the register
2353 is set and is used to compute "availability".
2355 last_bb records the block for which first_set and last_set are
2356 valid, as a quick test to invalidate them.
2358 reg_set_in_block records whether the register is set in the block
2359 and is used to compute "transparency". */
2362 record_last_reg_set_info (insn, regno)
2366 struct reg_avail_info *info = ®_avail_info[regno];
2367 int cuid = INSN_CUID (insn);
2369 info->last_set = cuid;
2370 if (info->last_bb != current_bb)
2372 info->last_bb = current_bb;
2373 info->first_set = cuid;
2374 SET_BIT (reg_set_in_block[current_bb], regno);
2379 /* Record all of the canonicalized MEMs of record_last_mem_set_info's insn.
2380 Note we store a pair of elements in the list, so they have to be
2381 taken off pairwise. */
2384 canon_list_insert (dest, unused1, v_insn)
2385 rtx dest ATTRIBUTE_UNUSED;
2386 rtx unused1 ATTRIBUTE_UNUSED;
2389 rtx dest_addr, insn;
2391 while (GET_CODE (dest) == SUBREG
2392 || GET_CODE (dest) == ZERO_EXTRACT
2393 || GET_CODE (dest) == SIGN_EXTRACT
2394 || GET_CODE (dest) == STRICT_LOW_PART)
2395 dest = XEXP (dest, 0);
2397 /* If DEST is not a MEM, then it will not conflict with a load. Note
2398 that function calls are assumed to clobber memory, but are handled
2401 if (GET_CODE (dest) != MEM)
2404 dest_addr = get_addr (XEXP (dest, 0));
2405 dest_addr = canon_rtx (dest_addr);
2406 insn = (rtx) v_insn;
2408 canon_modify_mem_list[BLOCK_NUM (insn)] =
2409 alloc_INSN_LIST (dest_addr, canon_modify_mem_list[BLOCK_NUM (insn)]);
2410 canon_modify_mem_list[BLOCK_NUM (insn)] =
2411 alloc_INSN_LIST (dest, canon_modify_mem_list[BLOCK_NUM (insn)]);
2412 bitmap_set_bit (canon_modify_mem_list_set, BLOCK_NUM (insn));
2415 /* Record memory modification information for INSN. We do not actually care
2416 about the memory location(s) that are set, or even how they are set (consider
2417 a CALL_INSN). We merely need to record which insns modify memory. */
2420 record_last_mem_set_info (insn)
2423 /* load_killed_in_block_p will handle the case of calls clobbering
2425 modify_mem_list[BLOCK_NUM (insn)] =
2426 alloc_INSN_LIST (insn, modify_mem_list[BLOCK_NUM (insn)]);
2427 bitmap_set_bit (modify_mem_list_set, BLOCK_NUM (insn));
2429 if (GET_CODE (insn) == CALL_INSN)
2431 /* Note that traversals of this loop (other than for free-ing)
2432 will break after encountering a CALL_INSN. So, there's no
2433 need to insert a pair of items, as canon_list_insert does. */
2434 canon_modify_mem_list[BLOCK_NUM (insn)] =
2435 alloc_INSN_LIST (insn, canon_modify_mem_list[BLOCK_NUM (insn)]);
2436 bitmap_set_bit (canon_modify_mem_list_set, BLOCK_NUM (insn));
2439 note_stores (PATTERN (insn), canon_list_insert, (void*) insn );
2442 /* Called from compute_hash_table via note_stores to handle one
2443 SET or CLOBBER in an insn. DATA is really the instruction in which
2444 the SET is taking place. */
2447 record_last_set_info (dest, setter, data)
2448 rtx dest, setter ATTRIBUTE_UNUSED;
2451 rtx last_set_insn = (rtx) data;
2453 if (GET_CODE (dest) == SUBREG)
2454 dest = SUBREG_REG (dest);
2456 if (GET_CODE (dest) == REG)
2457 record_last_reg_set_info (last_set_insn, REGNO (dest));
2458 else if (GET_CODE (dest) == MEM
2459 /* Ignore pushes, they clobber nothing. */
2460 && ! push_operand (dest, GET_MODE (dest)))
2461 record_last_mem_set_info (last_set_insn);
2464 /* Top level function to create an expression or assignment hash table.
2466 Expression entries are placed in the hash table if
2467 - they are of the form (set (pseudo-reg) src),
2468 - src is something we want to perform GCSE on,
2469 - none of the operands are subsequently modified in the block
2471 Assignment entries are placed in the hash table if
2472 - they are of the form (set (pseudo-reg) src),
2473 - src is something we want to perform const/copy propagation on,
2474 - none of the operands or target are subsequently modified in the block
2476 Currently src must be a pseudo-reg or a const_int.
2478 F is the first insn.
2479 SET_P is non-zero for computing the assignment hash table. */
2482 compute_hash_table (set_p)
2487 /* While we compute the hash table we also compute a bit array of which
2488 registers are set in which blocks.
2489 ??? This isn't needed during const/copy propagation, but it's cheap to
2491 sbitmap_vector_zero (reg_set_in_block, n_basic_blocks);
2493 /* re-Cache any INSN_LIST nodes we have allocated. */
2494 clear_modify_mem_tables ();
2495 /* Some working arrays used to track first and last set in each block. */
2496 reg_avail_info = (struct reg_avail_info*)
2497 gmalloc (max_gcse_regno * sizeof (struct reg_avail_info));
2499 for (i = 0; i < max_gcse_regno; ++i)
2500 reg_avail_info[i].last_bb = NEVER_SET;
2502 for (current_bb = 0; current_bb < n_basic_blocks; current_bb++)
2506 int in_libcall_block;
2508 /* First pass over the instructions records information used to
2509 determine when registers and memory are first and last set.
2510 ??? hard-reg reg_set_in_block computation
2511 could be moved to compute_sets since they currently don't change. */
2513 for (insn = BLOCK_HEAD (current_bb);
2514 insn && insn != NEXT_INSN (BLOCK_END (current_bb));
2515 insn = NEXT_INSN (insn))
2517 if (! INSN_P (insn))
2520 if (GET_CODE (insn) == CALL_INSN)
2522 bool clobbers_all = false;
2523 #ifdef NON_SAVING_SETJMP
2524 if (NON_SAVING_SETJMP
2525 && find_reg_note (insn, REG_SETJMP, NULL_RTX))
2526 clobbers_all = true;
2529 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
2531 || TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
2532 record_last_reg_set_info (insn, regno);
2537 note_stores (PATTERN (insn), record_last_set_info, insn);
2540 /* The next pass builds the hash table. */
2542 for (insn = BLOCK_HEAD (current_bb), in_libcall_block = 0;
2543 insn && insn != NEXT_INSN (BLOCK_END (current_bb));
2544 insn = NEXT_INSN (insn))
2547 if (find_reg_note (insn, REG_LIBCALL, NULL_RTX))
2548 in_libcall_block = 1;
2549 else if (set_p && find_reg_note (insn, REG_RETVAL, NULL_RTX))
2550 in_libcall_block = 0;
2551 hash_scan_insn (insn, set_p, in_libcall_block);
2552 if (!set_p && find_reg_note (insn, REG_RETVAL, NULL_RTX))
2553 in_libcall_block = 0;
2557 free (reg_avail_info);
2558 reg_avail_info = NULL;
2561 /* Allocate space for the set hash table.
2562 N_INSNS is the number of instructions in the function.
2563 It is used to determine the number of buckets to use. */
2566 alloc_set_hash_table (n_insns)
2571 set_hash_table_size = n_insns / 4;
2572 if (set_hash_table_size < 11)
2573 set_hash_table_size = 11;
2575 /* Attempt to maintain efficient use of hash table.
2576 Making it an odd number is simplest for now.
2577 ??? Later take some measurements. */
2578 set_hash_table_size |= 1;
2579 n = set_hash_table_size * sizeof (struct expr *);
2580 set_hash_table = (struct expr **) gmalloc (n);
2583 /* Free things allocated by alloc_set_hash_table. */
2586 free_set_hash_table ()
2588 free (set_hash_table);
2591 /* Compute the hash table for doing copy/const propagation. */
2594 compute_set_hash_table ()
2596 /* Initialize count of number of entries in hash table. */
2598 memset ((char *) set_hash_table, 0,
2599 set_hash_table_size * sizeof (struct expr *));
2601 compute_hash_table (1);
2604 /* Allocate space for the expression hash table.
2605 N_INSNS is the number of instructions in the function.
2606 It is used to determine the number of buckets to use. */
2609 alloc_expr_hash_table (n_insns)
2610 unsigned int n_insns;
2614 expr_hash_table_size = n_insns / 2;
2615 /* Make sure the amount is usable. */
2616 if (expr_hash_table_size < 11)
2617 expr_hash_table_size = 11;
2619 /* Attempt to maintain efficient use of hash table.
2620 Making it an odd number is simplest for now.
2621 ??? Later take some measurements. */
2622 expr_hash_table_size |= 1;
2623 n = expr_hash_table_size * sizeof (struct expr *);
2624 expr_hash_table = (struct expr **) gmalloc (n);
2627 /* Free things allocated by alloc_expr_hash_table. */
2630 free_expr_hash_table ()
2632 free (expr_hash_table);
2635 /* Compute the hash table for doing GCSE. */
2638 compute_expr_hash_table ()
2640 /* Initialize count of number of entries in hash table. */
2642 memset ((char *) expr_hash_table, 0,
2643 expr_hash_table_size * sizeof (struct expr *));
2645 compute_hash_table (0);
2648 /* Expression tracking support. */
2650 /* Lookup pattern PAT in the expression table.
2651 The result is a pointer to the table entry, or NULL if not found. */
2653 static struct expr *
2657 int do_not_record_p;
2658 unsigned int hash = hash_expr (pat, GET_MODE (pat), &do_not_record_p,
2659 expr_hash_table_size);
2662 if (do_not_record_p)
2665 expr = expr_hash_table[hash];
2667 while (expr && ! expr_equiv_p (expr->expr, pat))
2668 expr = expr->next_same_hash;
2673 /* Lookup REGNO in the set table. If PAT is non-NULL look for the entry that
2674 matches it, otherwise return the first entry for REGNO. The result is a
2675 pointer to the table entry, or NULL if not found. */
2677 static struct expr *
2678 lookup_set (regno, pat)
2682 unsigned int hash = hash_set (regno, set_hash_table_size);
2685 expr = set_hash_table[hash];
2689 while (expr && ! expr_equiv_p (expr->expr, pat))
2690 expr = expr->next_same_hash;
2694 while (expr && REGNO (SET_DEST (expr->expr)) != regno)
2695 expr = expr->next_same_hash;
2701 /* Return the next entry for REGNO in list EXPR. */
2703 static struct expr *
2704 next_set (regno, expr)
2709 expr = expr->next_same_hash;
2710 while (expr && REGNO (SET_DEST (expr->expr)) != regno);
2715 /* Clear canon_modify_mem_list and modify_mem_list tables. */
2717 clear_modify_mem_tables ()
2721 EXECUTE_IF_SET_IN_BITMAP
2722 (canon_modify_mem_list_set, 0, i,
2723 free_INSN_LIST_list (modify_mem_list + i));
2724 bitmap_clear (canon_modify_mem_list_set);
2726 EXECUTE_IF_SET_IN_BITMAP
2727 (canon_modify_mem_list_set, 0, i,
2728 free_INSN_LIST_list (canon_modify_mem_list + i));
2729 bitmap_clear (modify_mem_list_set);
2732 /* Release memory used by modify_mem_list_set and canon_modify_mem_list_set. */
2735 free_modify_mem_tables ()
2737 clear_modify_mem_tables ();
2738 free (modify_mem_list);
2739 free (canon_modify_mem_list);
2740 modify_mem_list = 0;
2741 canon_modify_mem_list = 0;
2744 /* Reset tables used to keep track of what's still available [since the
2745 start of the block]. */
2748 reset_opr_set_tables ()
2750 /* Maintain a bitmap of which regs have been set since beginning of
2752 CLEAR_REG_SET (reg_set_bitmap);
2754 /* Also keep a record of the last instruction to modify memory.
2755 For now this is very trivial, we only record whether any memory
2756 location has been modified. */
2757 clear_modify_mem_tables ();
2760 /* Return non-zero if the operands of X are not set before INSN in
2761 INSN's basic block. */
2764 oprs_not_set_p (x, insn)
2774 code = GET_CODE (x);
2790 if (load_killed_in_block_p (BLOCK_FOR_INSN (insn),
2791 INSN_CUID (insn), x, 0))
2794 return oprs_not_set_p (XEXP (x, 0), insn);
2797 return ! REGNO_REG_SET_P (reg_set_bitmap, REGNO (x));
2803 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2807 /* If we are about to do the last recursive call
2808 needed at this level, change it into iteration.
2809 This function is called enough to be worth it. */
2811 return oprs_not_set_p (XEXP (x, i), insn);
2813 if (! oprs_not_set_p (XEXP (x, i), insn))
2816 else if (fmt[i] == 'E')
2817 for (j = 0; j < XVECLEN (x, i); j++)
2818 if (! oprs_not_set_p (XVECEXP (x, i, j), insn))
2825 /* Mark things set by a CALL. */
2831 if (! CONST_OR_PURE_CALL_P (insn))
2832 record_last_mem_set_info (insn);
2835 /* Mark things set by a SET. */
2838 mark_set (pat, insn)
2841 rtx dest = SET_DEST (pat);
2843 while (GET_CODE (dest) == SUBREG
2844 || GET_CODE (dest) == ZERO_EXTRACT
2845 || GET_CODE (dest) == SIGN_EXTRACT
2846 || GET_CODE (dest) == STRICT_LOW_PART)
2847 dest = XEXP (dest, 0);
2849 if (GET_CODE (dest) == REG)
2850 SET_REGNO_REG_SET (reg_set_bitmap, REGNO (dest));
2851 else if (GET_CODE (dest) == MEM)
2852 record_last_mem_set_info (insn);
2854 if (GET_CODE (SET_SRC (pat)) == CALL)
2858 /* Record things set by a CLOBBER. */
2861 mark_clobber (pat, insn)
2864 rtx clob = XEXP (pat, 0);
2866 while (GET_CODE (clob) == SUBREG || GET_CODE (clob) == STRICT_LOW_PART)
2867 clob = XEXP (clob, 0);
2869 if (GET_CODE (clob) == REG)
2870 SET_REGNO_REG_SET (reg_set_bitmap, REGNO (clob));
2872 record_last_mem_set_info (insn);
2875 /* Record things set by INSN.
2876 This data is used by oprs_not_set_p. */
2879 mark_oprs_set (insn)
2882 rtx pat = PATTERN (insn);
2885 if (GET_CODE (pat) == SET)
2886 mark_set (pat, insn);
2887 else if (GET_CODE (pat) == PARALLEL)
2888 for (i = 0; i < XVECLEN (pat, 0); i++)
2890 rtx x = XVECEXP (pat, 0, i);
2892 if (GET_CODE (x) == SET)
2894 else if (GET_CODE (x) == CLOBBER)
2895 mark_clobber (x, insn);
2896 else if (GET_CODE (x) == CALL)
2900 else if (GET_CODE (pat) == CLOBBER)
2901 mark_clobber (pat, insn);
2902 else if (GET_CODE (pat) == CALL)
2907 /* Classic GCSE reaching definition support. */
2909 /* Allocate reaching def variables. */
2912 alloc_rd_mem (n_blocks, n_insns)
2913 int n_blocks, n_insns;
2915 rd_kill = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
2916 sbitmap_vector_zero (rd_kill, n_basic_blocks);
2918 rd_gen = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
2919 sbitmap_vector_zero (rd_gen, n_basic_blocks);
2921 reaching_defs = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
2922 sbitmap_vector_zero (reaching_defs, n_basic_blocks);
2924 rd_out = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
2925 sbitmap_vector_zero (rd_out, n_basic_blocks);
2928 /* Free reaching def variables. */
2933 sbitmap_vector_free (rd_kill);
2934 sbitmap_vector_free (rd_gen);
2935 sbitmap_vector_free (reaching_defs);
2936 sbitmap_vector_free (rd_out);
2939 /* Add INSN to the kills of BB. REGNO, set in BB, is killed by INSN. */
2942 handle_rd_kill_set (insn, regno, bb)
2947 struct reg_set *this_reg;
2949 for (this_reg = reg_set_table[regno]; this_reg; this_reg = this_reg ->next)
2950 if (BLOCK_NUM (this_reg->insn) != BLOCK_NUM (insn))
2951 SET_BIT (rd_kill[bb->index], INSN_CUID (this_reg->insn));
2954 /* Compute the set of kill's for reaching definitions. */
2964 For each set bit in `gen' of the block (i.e each insn which
2965 generates a definition in the block)
2966 Call the reg set by the insn corresponding to that bit regx
2967 Look at the linked list starting at reg_set_table[regx]
2968 For each setting of regx in the linked list, which is not in
2970 Set the bit in `kill' corresponding to that insn. */
2971 for (bb = 0; bb < n_basic_blocks; bb++)
2972 for (cuid = 0; cuid < max_cuid; cuid++)
2973 if (TEST_BIT (rd_gen[bb], cuid))
2975 rtx insn = CUID_INSN (cuid);
2976 rtx pat = PATTERN (insn);
2978 if (GET_CODE (insn) == CALL_INSN)
2980 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
2981 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
2982 handle_rd_kill_set (insn, regno, BASIC_BLOCK (bb));
2985 if (GET_CODE (pat) == PARALLEL)
2987 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
2989 enum rtx_code code = GET_CODE (XVECEXP (pat, 0, i));
2991 if ((code == SET || code == CLOBBER)
2992 && GET_CODE (XEXP (XVECEXP (pat, 0, i), 0)) == REG)
2993 handle_rd_kill_set (insn,
2994 REGNO (XEXP (XVECEXP (pat, 0, i), 0)),
2998 else if (GET_CODE (pat) == SET && GET_CODE (SET_DEST (pat)) == REG)
2999 /* Each setting of this register outside of this block
3000 must be marked in the set of kills in this block. */
3001 handle_rd_kill_set (insn, REGNO (SET_DEST (pat)), BASIC_BLOCK (bb));
3005 /* Compute the reaching definitions as in
3006 Compilers Principles, Techniques, and Tools. Aho, Sethi, Ullman,
3007 Chapter 10. It is the same algorithm as used for computing available
3008 expressions but applied to the gens and kills of reaching definitions. */
3013 int bb, changed, passes;
3015 for (bb = 0; bb < n_basic_blocks; bb++)
3016 sbitmap_copy (rd_out[bb] /*dst*/, rd_gen[bb] /*src*/);
3023 for (bb = 0; bb < n_basic_blocks; bb++)
3025 sbitmap_union_of_preds (reaching_defs[bb], rd_out, bb);
3026 changed |= sbitmap_union_of_diff_cg (rd_out[bb], rd_gen[bb],
3027 reaching_defs[bb], rd_kill[bb]);
3033 fprintf (gcse_file, "reaching def computation: %d passes\n", passes);
3036 /* Classic GCSE available expression support. */
3038 /* Allocate memory for available expression computation. */
3041 alloc_avail_expr_mem (n_blocks, n_exprs)
3042 int n_blocks, n_exprs;
3044 ae_kill = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
3045 sbitmap_vector_zero (ae_kill, n_basic_blocks);
3047 ae_gen = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
3048 sbitmap_vector_zero (ae_gen, n_basic_blocks);
3050 ae_in = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
3051 sbitmap_vector_zero (ae_in, n_basic_blocks);
3053 ae_out = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
3054 sbitmap_vector_zero (ae_out, n_basic_blocks);
3058 free_avail_expr_mem ()
3060 sbitmap_vector_free (ae_kill);
3061 sbitmap_vector_free (ae_gen);
3062 sbitmap_vector_free (ae_in);
3063 sbitmap_vector_free (ae_out);
3066 /* Compute the set of available expressions generated in each basic block. */
3075 /* For each recorded occurrence of each expression, set ae_gen[bb][expr].
3076 This is all we have to do because an expression is not recorded if it
3077 is not available, and the only expressions we want to work with are the
3078 ones that are recorded. */
3079 for (i = 0; i < expr_hash_table_size; i++)
3080 for (expr = expr_hash_table[i]; expr != 0; expr = expr->next_same_hash)
3081 for (occr = expr->avail_occr; occr != 0; occr = occr->next)
3082 SET_BIT (ae_gen[BLOCK_NUM (occr->insn)], expr->bitmap_index);
3085 /* Return non-zero if expression X is killed in BB. */
3088 expr_killed_p (x, bb)
3099 code = GET_CODE (x);
3103 return TEST_BIT (reg_set_in_block[bb->index], REGNO (x));
3106 if (load_killed_in_block_p (bb, get_max_uid () + 1, x, 0))
3109 return expr_killed_p (XEXP (x, 0), bb);
3127 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
3131 /* If we are about to do the last recursive call
3132 needed at this level, change it into iteration.
3133 This function is called enough to be worth it. */
3135 return expr_killed_p (XEXP (x, i), bb);
3136 else if (expr_killed_p (XEXP (x, i), bb))
3139 else if (fmt[i] == 'E')
3140 for (j = 0; j < XVECLEN (x, i); j++)
3141 if (expr_killed_p (XVECEXP (x, i, j), bb))
3148 /* Compute the set of available expressions killed in each basic block. */
3151 compute_ae_kill (ae_gen, ae_kill)
3152 sbitmap *ae_gen, *ae_kill;
3158 for (bb = 0; bb < n_basic_blocks; bb++)
3159 for (i = 0; i < expr_hash_table_size; i++)
3160 for (expr = expr_hash_table[i]; expr; expr = expr->next_same_hash)
3162 /* Skip EXPR if generated in this block. */
3163 if (TEST_BIT (ae_gen[bb], expr->bitmap_index))
3166 if (expr_killed_p (expr->expr, BASIC_BLOCK (bb)))
3167 SET_BIT (ae_kill[bb], expr->bitmap_index);
3171 /* Actually perform the Classic GCSE optimizations. */
3173 /* Return non-zero if occurrence OCCR of expression EXPR reaches block BB.
3175 CHECK_SELF_LOOP is non-zero if we should consider a block reaching itself
3176 as a positive reach. We want to do this when there are two computations
3177 of the expression in the block.
3179 VISITED is a pointer to a working buffer for tracking which BB's have
3180 been visited. It is NULL for the top-level call.
3182 We treat reaching expressions that go through blocks containing the same
3183 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
3184 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
3185 2 as not reaching. The intent is to improve the probability of finding
3186 only one reaching expression and to reduce register lifetimes by picking
3187 the closest such expression. */
3190 expr_reaches_here_p_work (occr, expr, bb, check_self_loop, visited)
3194 int check_self_loop;
3199 for (pred = bb->pred; pred != NULL; pred = pred->pred_next)
3201 basic_block pred_bb = pred->src;
3203 if (visited[pred_bb->index])
3204 /* This predecessor has already been visited. Nothing to do. */
3206 else if (pred_bb == bb)
3208 /* BB loops on itself. */
3210 && TEST_BIT (ae_gen[pred_bb->index], expr->bitmap_index)
3211 && BLOCK_NUM (occr->insn) == pred_bb->index)
3214 visited[pred_bb->index] = 1;
3217 /* Ignore this predecessor if it kills the expression. */
3218 else if (TEST_BIT (ae_kill[pred_bb->index], expr->bitmap_index))
3219 visited[pred_bb->index] = 1;
3221 /* Does this predecessor generate this expression? */
3222 else if (TEST_BIT (ae_gen[pred_bb->index], expr->bitmap_index))
3224 /* Is this the occurrence we're looking for?
3225 Note that there's only one generating occurrence per block
3226 so we just need to check the block number. */
3227 if (BLOCK_NUM (occr->insn) == pred_bb->index)
3230 visited[pred_bb->index] = 1;
3233 /* Neither gen nor kill. */
3236 visited[pred_bb->index] = 1;
3237 if (expr_reaches_here_p_work (occr, expr, pred_bb, check_self_loop,
3244 /* All paths have been checked. */
3248 /* This wrapper for expr_reaches_here_p_work() is to ensure that any
3249 memory allocated for that function is returned. */
3252 expr_reaches_here_p (occr, expr, bb, check_self_loop)
3256 int check_self_loop;
3259 char *visited = (char *) xcalloc (n_basic_blocks, 1);
3261 rval = expr_reaches_here_p_work (occr, expr, bb, check_self_loop, visited);
3267 /* Return the instruction that computes EXPR that reaches INSN's basic block.
3268 If there is more than one such instruction, return NULL.
3270 Called only by handle_avail_expr. */
3273 computing_insn (expr, insn)
3277 basic_block bb = BLOCK_FOR_INSN (insn);
3279 if (expr->avail_occr->next == NULL)
3281 if (BLOCK_FOR_INSN (expr->avail_occr->insn) == bb)
3282 /* The available expression is actually itself
3283 (i.e. a loop in the flow graph) so do nothing. */
3286 /* (FIXME) Case that we found a pattern that was created by
3287 a substitution that took place. */
3288 return expr->avail_occr->insn;
3292 /* Pattern is computed more than once.
3293 Search backwards from this insn to see how many of these
3294 computations actually reach this insn. */
3296 rtx insn_computes_expr = NULL;
3299 for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
3301 if (BLOCK_FOR_INSN (occr->insn) == bb)
3303 /* The expression is generated in this block.
3304 The only time we care about this is when the expression
3305 is generated later in the block [and thus there's a loop].
3306 We let the normal cse pass handle the other cases. */
3307 if (INSN_CUID (insn) < INSN_CUID (occr->insn)
3308 && expr_reaches_here_p (occr, expr, bb, 1))
3314 insn_computes_expr = occr->insn;
3317 else if (expr_reaches_here_p (occr, expr, bb, 0))
3323 insn_computes_expr = occr->insn;
3327 if (insn_computes_expr == NULL)
3330 return insn_computes_expr;
3334 /* Return non-zero if the definition in DEF_INSN can reach INSN.
3335 Only called by can_disregard_other_sets. */
3338 def_reaches_here_p (insn, def_insn)
3343 if (TEST_BIT (reaching_defs[BLOCK_NUM (insn)], INSN_CUID (def_insn)))
3346 if (BLOCK_NUM (insn) == BLOCK_NUM (def_insn))
3348 if (INSN_CUID (def_insn) < INSN_CUID (insn))
3350 if (GET_CODE (PATTERN (def_insn)) == PARALLEL)
3352 else if (GET_CODE (PATTERN (def_insn)) == CLOBBER)
3353 reg = XEXP (PATTERN (def_insn), 0);
3354 else if (GET_CODE (PATTERN (def_insn)) == SET)
3355 reg = SET_DEST (PATTERN (def_insn));
3359 return ! reg_set_between_p (reg, NEXT_INSN (def_insn), insn);
3368 /* Return non-zero if *ADDR_THIS_REG can only have one value at INSN. The
3369 value returned is the number of definitions that reach INSN. Returning a
3370 value of zero means that [maybe] more than one definition reaches INSN and
3371 the caller can't perform whatever optimization it is trying. i.e. it is
3372 always safe to return zero. */
3375 can_disregard_other_sets (addr_this_reg, insn, for_combine)
3376 struct reg_set **addr_this_reg;
3380 int number_of_reaching_defs = 0;
3381 struct reg_set *this_reg;
3383 for (this_reg = *addr_this_reg; this_reg != 0; this_reg = this_reg->next)
3384 if (def_reaches_here_p (insn, this_reg->insn))
3386 number_of_reaching_defs++;
3387 /* Ignore parallels for now. */
3388 if (GET_CODE (PATTERN (this_reg->insn)) == PARALLEL)
3392 && (GET_CODE (PATTERN (this_reg->insn)) == CLOBBER
3393 || ! rtx_equal_p (SET_SRC (PATTERN (this_reg->insn)),
3394 SET_SRC (PATTERN (insn)))))
3395 /* A setting of the reg to a different value reaches INSN. */
3398 if (number_of_reaching_defs > 1)
3400 /* If in this setting the value the register is being set to is
3401 equal to the previous value the register was set to and this
3402 setting reaches the insn we are trying to do the substitution
3403 on then we are ok. */
3404 if (GET_CODE (PATTERN (this_reg->insn)) == CLOBBER)
3406 else if (! rtx_equal_p (SET_SRC (PATTERN (this_reg->insn)),
3407 SET_SRC (PATTERN (insn))))
3411 *addr_this_reg = this_reg;
3414 return number_of_reaching_defs;
3417 /* Expression computed by insn is available and the substitution is legal,
3418 so try to perform the substitution.
3420 The result is non-zero if any changes were made. */
3423 handle_avail_expr (insn, expr)
3427 rtx pat, insn_computes_expr, expr_set;
3429 struct reg_set *this_reg;
3430 int found_setting, use_src;
3433 /* We only handle the case where one computation of the expression
3434 reaches this instruction. */
3435 insn_computes_expr = computing_insn (expr, insn);
3436 if (insn_computes_expr == NULL)
3438 expr_set = single_set (insn_computes_expr);
3445 /* At this point we know only one computation of EXPR outside of this
3446 block reaches this insn. Now try to find a register that the
3447 expression is computed into. */
3448 if (GET_CODE (SET_SRC (expr_set)) == REG)
3450 /* This is the case when the available expression that reaches
3451 here has already been handled as an available expression. */
3452 unsigned int regnum_for_replacing
3453 = REGNO (SET_SRC (expr_set));
3455 /* If the register was created by GCSE we can't use `reg_set_table',
3456 however we know it's set only once. */
3457 if (regnum_for_replacing >= max_gcse_regno
3458 /* If the register the expression is computed into is set only once,
3459 or only one set reaches this insn, we can use it. */
3460 || (((this_reg = reg_set_table[regnum_for_replacing]),
3461 this_reg->next == NULL)
3462 || can_disregard_other_sets (&this_reg, insn, 0)))
3471 unsigned int regnum_for_replacing
3472 = REGNO (SET_DEST (expr_set));
3474 /* This shouldn't happen. */
3475 if (regnum_for_replacing >= max_gcse_regno)
3478 this_reg = reg_set_table[regnum_for_replacing];
3480 /* If the register the expression is computed into is set only once,
3481 or only one set reaches this insn, use it. */
3482 if (this_reg->next == NULL
3483 || can_disregard_other_sets (&this_reg, insn, 0))
3489 pat = PATTERN (insn);
3491 to = SET_SRC (expr_set);
3493 to = SET_DEST (expr_set);
3494 changed = validate_change (insn, &SET_SRC (pat), to, 0);
3496 /* We should be able to ignore the return code from validate_change but
3497 to play it safe we check. */
3501 if (gcse_file != NULL)
3503 fprintf (gcse_file, "GCSE: Replacing the source in insn %d with",
3505 fprintf (gcse_file, " reg %d %s insn %d\n",
3506 REGNO (to), use_src ? "from" : "set in",
3507 INSN_UID (insn_computes_expr));
3512 /* The register that the expr is computed into is set more than once. */
3513 else if (1 /*expensive_op(this_pattrn->op) && do_expensive_gcse)*/)
3515 /* Insert an insn after insnx that copies the reg set in insnx
3516 into a new pseudo register call this new register REGN.
3517 From insnb until end of basic block or until REGB is set
3518 replace all uses of REGB with REGN. */
3521 to = gen_reg_rtx (GET_MODE (SET_DEST (expr_set)));
3523 /* Generate the new insn. */
3524 /* ??? If the change fails, we return 0, even though we created
3525 an insn. I think this is ok. */
3527 = emit_insn_after (gen_rtx_SET (VOIDmode, to,
3528 SET_DEST (expr_set)),
3529 insn_computes_expr);
3531 /* Keep register set table up to date. */
3532 record_one_set (REGNO (to), new_insn);
3534 gcse_create_count++;
3535 if (gcse_file != NULL)
3537 fprintf (gcse_file, "GCSE: Creating insn %d to copy value of reg %d",
3538 INSN_UID (NEXT_INSN (insn_computes_expr)),
3539 REGNO (SET_SRC (PATTERN (NEXT_INSN (insn_computes_expr)))));
3540 fprintf (gcse_file, ", computed in insn %d,\n",
3541 INSN_UID (insn_computes_expr));
3542 fprintf (gcse_file, " into newly allocated reg %d\n",
3546 pat = PATTERN (insn);
3548 /* Do register replacement for INSN. */
3549 changed = validate_change (insn, &SET_SRC (pat),
3551 (NEXT_INSN (insn_computes_expr))),
3554 /* We should be able to ignore the return code from validate_change but
3555 to play it safe we check. */
3559 if (gcse_file != NULL)
3562 "GCSE: Replacing the source in insn %d with reg %d ",
3564 REGNO (SET_DEST (PATTERN (NEXT_INSN
3565 (insn_computes_expr)))));
3566 fprintf (gcse_file, "set in insn %d\n",
3567 INSN_UID (insn_computes_expr));
3575 /* Perform classic GCSE. This is called by one_classic_gcse_pass after all
3576 the dataflow analysis has been done.
3578 The result is non-zero if a change was made. */
3586 /* Note we start at block 1. */
3589 for (bb = 1; bb < n_basic_blocks; bb++)
3591 /* Reset tables used to keep track of what's still valid [since the
3592 start of the block]. */
3593 reset_opr_set_tables ();
3595 for (insn = BLOCK_HEAD (bb);
3596 insn != NULL && insn != NEXT_INSN (BLOCK_END (bb));
3597 insn = NEXT_INSN (insn))
3599 /* Is insn of form (set (pseudo-reg) ...)? */
3600 if (GET_CODE (insn) == INSN
3601 && GET_CODE (PATTERN (insn)) == SET
3602 && GET_CODE (SET_DEST (PATTERN (insn))) == REG
3603 && REGNO (SET_DEST (PATTERN (insn))) >= FIRST_PSEUDO_REGISTER)
3605 rtx pat = PATTERN (insn);
3606 rtx src = SET_SRC (pat);
3609 if (want_to_gcse_p (src)
3610 /* Is the expression recorded? */
3611 && ((expr = lookup_expr (src)) != NULL)
3612 /* Is the expression available [at the start of the
3614 && TEST_BIT (ae_in[bb], expr->bitmap_index)
3615 /* Are the operands unchanged since the start of the
3617 && oprs_not_set_p (src, insn))
3618 changed |= handle_avail_expr (insn, expr);
3621 /* Keep track of everything modified by this insn. */
3622 /* ??? Need to be careful w.r.t. mods done to INSN. */
3624 mark_oprs_set (insn);
3631 /* Top level routine to perform one classic GCSE pass.
3633 Return non-zero if a change was made. */
3636 one_classic_gcse_pass (pass)
3641 gcse_subst_count = 0;
3642 gcse_create_count = 0;
3644 alloc_expr_hash_table (max_cuid);
3645 alloc_rd_mem (n_basic_blocks, max_cuid);
3646 compute_expr_hash_table ();
3648 dump_hash_table (gcse_file, "Expression", expr_hash_table,
3649 expr_hash_table_size, n_exprs);
3655 alloc_avail_expr_mem (n_basic_blocks, n_exprs);
3657 compute_ae_kill (ae_gen, ae_kill);
3658 compute_available (ae_gen, ae_kill, ae_out, ae_in);
3659 changed = classic_gcse ();
3660 free_avail_expr_mem ();
3664 free_expr_hash_table ();
3668 fprintf (gcse_file, "\n");
3669 fprintf (gcse_file, "GCSE of %s, pass %d: %d bytes needed, %d substs,",
3670 current_function_name, pass, bytes_used, gcse_subst_count);
3671 fprintf (gcse_file, "%d insns created\n", gcse_create_count);
3677 /* Compute copy/constant propagation working variables. */
3679 /* Local properties of assignments. */
3680 static sbitmap *cprop_pavloc;
3681 static sbitmap *cprop_absaltered;
3683 /* Global properties of assignments (computed from the local properties). */
3684 static sbitmap *cprop_avin;
3685 static sbitmap *cprop_avout;
3687 /* Allocate vars used for copy/const propagation. N_BLOCKS is the number of
3688 basic blocks. N_SETS is the number of sets. */
3691 alloc_cprop_mem (n_blocks, n_sets)
3692 int n_blocks, n_sets;
3694 cprop_pavloc = sbitmap_vector_alloc (n_blocks, n_sets);
3695 cprop_absaltered = sbitmap_vector_alloc (n_blocks, n_sets);
3697 cprop_avin = sbitmap_vector_alloc (n_blocks, n_sets);
3698 cprop_avout = sbitmap_vector_alloc (n_blocks, n_sets);
3701 /* Free vars used by copy/const propagation. */
3706 sbitmap_vector_free (cprop_pavloc);
3707 sbitmap_vector_free (cprop_absaltered);
3708 sbitmap_vector_free (cprop_avin);
3709 sbitmap_vector_free (cprop_avout);
3712 /* For each block, compute whether X is transparent. X is either an
3713 expression or an assignment [though we don't care which, for this context
3714 an assignment is treated as an expression]. For each block where an
3715 element of X is modified, set (SET_P == 1) or reset (SET_P == 0) the INDX
3719 compute_transp (x, indx, bmap, set_p)
3730 /* repeat is used to turn tail-recursion into iteration since GCC
3731 can't do it when there's no return value. */
3737 code = GET_CODE (x);
3743 if (REGNO (x) < FIRST_PSEUDO_REGISTER)
3745 for (bb = 0; bb < n_basic_blocks; bb++)
3746 if (TEST_BIT (reg_set_in_block[bb], REGNO (x)))
3747 SET_BIT (bmap[bb], indx);
3751 for (r = reg_set_table[REGNO (x)]; r != NULL; r = r->next)
3752 SET_BIT (bmap[BLOCK_NUM (r->insn)], indx);
3757 if (REGNO (x) < FIRST_PSEUDO_REGISTER)
3759 for (bb = 0; bb < n_basic_blocks; bb++)
3760 if (TEST_BIT (reg_set_in_block[bb], REGNO (x)))
3761 RESET_BIT (bmap[bb], indx);
3765 for (r = reg_set_table[REGNO (x)]; r != NULL; r = r->next)
3766 RESET_BIT (bmap[BLOCK_NUM (r->insn)], indx);
3773 for (bb = 0; bb < n_basic_blocks; bb++)
3775 rtx list_entry = canon_modify_mem_list[bb];
3779 rtx dest, dest_addr;
3781 if (GET_CODE (XEXP (list_entry, 0)) == CALL_INSN)
3784 SET_BIT (bmap[bb], indx);
3786 RESET_BIT (bmap[bb], indx);
3789 /* LIST_ENTRY must be an INSN of some kind that sets memory.
3790 Examine each hunk of memory that is modified. */
3792 dest = XEXP (list_entry, 0);
3793 list_entry = XEXP (list_entry, 1);
3794 dest_addr = XEXP (list_entry, 0);
3796 if (canon_true_dependence (dest, GET_MODE (dest), dest_addr,
3797 x, rtx_addr_varies_p))
3800 SET_BIT (bmap[bb], indx);
3802 RESET_BIT (bmap[bb], indx);
3805 list_entry = XEXP (list_entry, 1);
3828 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
3832 /* If we are about to do the last recursive call
3833 needed at this level, change it into iteration.
3834 This function is called enough to be worth it. */
3841 compute_transp (XEXP (x, i), indx, bmap, set_p);
3843 else if (fmt[i] == 'E')
3844 for (j = 0; j < XVECLEN (x, i); j++)
3845 compute_transp (XVECEXP (x, i, j), indx, bmap, set_p);
3849 /* Top level routine to do the dataflow analysis needed by copy/const
3853 compute_cprop_data ()
3855 compute_local_properties (cprop_absaltered, cprop_pavloc, NULL, 1);
3856 compute_available (cprop_pavloc, cprop_absaltered,
3857 cprop_avout, cprop_avin);
3860 /* Copy/constant propagation. */
3862 /* Maximum number of register uses in an insn that we handle. */
3865 /* Table of uses found in an insn.
3866 Allocated statically to avoid alloc/free complexity and overhead. */
3867 static struct reg_use reg_use_table[MAX_USES];
3869 /* Index into `reg_use_table' while building it. */
3870 static int reg_use_count;
3872 /* Set up a list of register numbers used in INSN. The found uses are stored
3873 in `reg_use_table'. `reg_use_count' is initialized to zero before entry,
3874 and contains the number of uses in the table upon exit.
3876 ??? If a register appears multiple times we will record it multiple times.
3877 This doesn't hurt anything but it will slow things down. */
3880 find_used_regs (xptr, data)
3882 void *data ATTRIBUTE_UNUSED;
3889 /* repeat is used to turn tail-recursion into iteration since GCC
3890 can't do it when there's no return value. */
3895 code = GET_CODE (x);
3898 if (reg_use_count == MAX_USES)
3901 reg_use_table[reg_use_count].reg_rtx = x;
3905 /* Recursively scan the operands of this expression. */
3907 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
3911 /* If we are about to do the last recursive call
3912 needed at this level, change it into iteration.
3913 This function is called enough to be worth it. */
3920 find_used_regs (&XEXP (x, i), data);
3922 else if (fmt[i] == 'E')
3923 for (j = 0; j < XVECLEN (x, i); j++)
3924 find_used_regs (&XVECEXP (x, i, j), data);
3928 /* Try to replace all non-SET_DEST occurrences of FROM in INSN with TO.
3929 Returns non-zero is successful. */
3932 try_replace_reg (from, to, insn)
3935 rtx note = find_reg_equal_equiv_note (insn);
3938 rtx set = single_set (insn);
3940 success = validate_replace_src (from, to, insn);
3942 /* If above failed and this is a single set, try to simplify the source of
3943 the set given our substitution. We could perhaps try this for multiple
3944 SETs, but it probably won't buy us anything. */
3945 if (!success && set != 0)
3947 src = simplify_replace_rtx (SET_SRC (set), from, to);
3949 if (!rtx_equal_p (src, SET_SRC (set))
3950 && validate_change (insn, &SET_SRC (set), src, 0))
3954 /* If we've failed to do replacement, have a single SET, and don't already
3955 have a note, add a REG_EQUAL note to not lose information. */
3956 if (!success && note == 0 && set != 0)
3957 note = set_unique_reg_note (insn, REG_EQUAL, copy_rtx (src));
3959 /* If there is already a NOTE, update the expression in it with our
3962 XEXP (note, 0) = simplify_replace_rtx (XEXP (note, 0), from, to);
3964 /* REG_EQUAL may get simplified into register.
3965 We don't allow that. Remove that note. This code ought
3966 not to hapen, because previous code ought to syntetize
3967 reg-reg move, but be on the safe side. */
3968 if (note && REG_P (XEXP (note, 0)))
3969 remove_note (insn, note);
3974 /* Find a set of REGNOs that are available on entry to INSN's block. Returns
3975 NULL no such set is found. */
3977 static struct expr *
3978 find_avail_set (regno, insn)
3982 /* SET1 contains the last set found that can be returned to the caller for
3983 use in a substitution. */
3984 struct expr *set1 = 0;
3986 /* Loops are not possible here. To get a loop we would need two sets
3987 available at the start of the block containing INSN. ie we would
3988 need two sets like this available at the start of the block:
3990 (set (reg X) (reg Y))
3991 (set (reg Y) (reg X))
3993 This can not happen since the set of (reg Y) would have killed the
3994 set of (reg X) making it unavailable at the start of this block. */
3998 struct expr *set = lookup_set (regno, NULL_RTX);
4000 /* Find a set that is available at the start of the block
4001 which contains INSN. */
4004 if (TEST_BIT (cprop_avin[BLOCK_NUM (insn)], set->bitmap_index))
4006 set = next_set (regno, set);
4009 /* If no available set was found we've reached the end of the
4010 (possibly empty) copy chain. */
4014 if (GET_CODE (set->expr) != SET)
4017 src = SET_SRC (set->expr);
4019 /* We know the set is available.
4020 Now check that SRC is ANTLOC (i.e. none of the source operands
4021 have changed since the start of the block).
4023 If the source operand changed, we may still use it for the next
4024 iteration of this loop, but we may not use it for substitutions. */
4026 if (CONSTANT_P (src) || oprs_not_set_p (src, insn))
4029 /* If the source of the set is anything except a register, then
4030 we have reached the end of the copy chain. */
4031 if (GET_CODE (src) != REG)
4034 /* Follow the copy chain, ie start another iteration of the loop
4035 and see if we have an available copy into SRC. */
4036 regno = REGNO (src);
4039 /* SET1 holds the last set that was available and anticipatable at
4044 /* Subroutine of cprop_insn that tries to propagate constants into
4045 JUMP_INSNS. INSN must be a conditional jump. FROM is what we will try to
4046 replace, SRC is the constant we will try to substitute for it. Returns
4047 nonzero if a change was made. We know INSN has just a SET. */
4050 cprop_jump (bb, insn, from, src)
4056 rtx set = PATTERN (insn);
4057 rtx new = simplify_replace_rtx (SET_SRC (set), from, src);
4059 /* If no simplification can be made, then try the next
4061 if (rtx_equal_p (new, SET_SRC (set)))
4064 /* If this is now a no-op delete it, otherwise this must be a valid insn. */
4069 if (! validate_change (insn, &SET_SRC (set), new, 0))
4072 /* If this has turned into an unconditional jump,
4073 then put a barrier after it so that the unreachable
4074 code will be deleted. */
4075 if (GET_CODE (SET_SRC (set)) == LABEL_REF)
4076 emit_barrier_after (insn);
4079 run_jump_opt_after_gcse = 1;
4082 if (gcse_file != NULL)
4085 "CONST-PROP: Replacing reg %d in insn %d with constant ",
4086 REGNO (from), INSN_UID (insn));
4087 print_rtl (gcse_file, src);
4088 fprintf (gcse_file, "\n");
4090 purge_dead_edges (bb);
4097 /* Subroutine of cprop_insn that tries to propagate constants into JUMP_INSNS
4098 for machines that have CC0. INSN is a single set that stores into CC0;
4099 the insn following it is a conditional jump. REG_USED is the use we will
4100 try to replace, SRC is the constant we will try to substitute for it.
4101 Returns nonzero if a change was made. */
4104 cprop_cc0_jump (bb, insn, reg_used, src)
4107 struct reg_use *reg_used;
4110 /* First substitute in the SET_SRC of INSN, then substitute that for
4112 rtx jump = NEXT_INSN (insn);
4113 rtx new_src = simplify_replace_rtx (SET_SRC (PATTERN (insn)),
4114 reg_used->reg_rtx, src);
4116 if (! cprop_jump (bb, jump, cc0_rtx, new_src))
4119 /* If we succeeded, delete the cc0 setter. */
4126 /* Perform constant and copy propagation on INSN.
4127 The result is non-zero if a change was made. */
4130 cprop_insn (bb, insn, alter_jumps)
4135 struct reg_use *reg_used;
4143 note_uses (&PATTERN (insn), find_used_regs, NULL);
4145 note = find_reg_equal_equiv_note (insn);
4147 /* We may win even when propagating constants into notes. */
4149 find_used_regs (&XEXP (note, 0), NULL);
4151 for (reg_used = ®_use_table[0]; reg_use_count > 0;
4152 reg_used++, reg_use_count--)
4154 unsigned int regno = REGNO (reg_used->reg_rtx);
4158 /* Ignore registers created by GCSE.
4159 We do this because ... */
4160 if (regno >= max_gcse_regno)
4163 /* If the register has already been set in this block, there's
4164 nothing we can do. */
4165 if (! oprs_not_set_p (reg_used->reg_rtx, insn))
4168 /* Find an assignment that sets reg_used and is available
4169 at the start of the block. */
4170 set = find_avail_set (regno, insn);
4175 /* ??? We might be able to handle PARALLELs. Later. */
4176 if (GET_CODE (pat) != SET)
4179 src = SET_SRC (pat);
4181 /* Constant propagation. */
4182 if (CONSTANT_P (src))
4184 /* Handle normal insns first. */
4185 if (GET_CODE (insn) == INSN
4186 && try_replace_reg (reg_used->reg_rtx, src, insn))
4190 if (gcse_file != NULL)
4192 fprintf (gcse_file, "CONST-PROP: Replacing reg %d in ",
4194 fprintf (gcse_file, "insn %d with constant ",
4196 print_rtl (gcse_file, src);
4197 fprintf (gcse_file, "\n");
4200 /* The original insn setting reg_used may or may not now be
4201 deletable. We leave the deletion to flow. */
4204 /* Try to propagate a CONST_INT into a conditional jump.
4205 We're pretty specific about what we will handle in this
4206 code, we can extend this as necessary over time.
4208 Right now the insn in question must look like
4209 (set (pc) (if_then_else ...)) */
4210 else if (alter_jumps
4211 && GET_CODE (insn) == JUMP_INSN
4212 && condjump_p (insn)
4213 && ! simplejump_p (insn))
4214 changed |= cprop_jump (bb, insn, reg_used->reg_rtx, src);
4217 /* Similar code for machines that use a pair of CC0 setter and
4218 conditional jump insn. */
4219 else if (alter_jumps
4220 && GET_CODE (PATTERN (insn)) == SET
4221 && SET_DEST (PATTERN (insn)) == cc0_rtx
4222 && GET_CODE (NEXT_INSN (insn)) == JUMP_INSN
4223 && condjump_p (NEXT_INSN (insn))
4224 && ! simplejump_p (NEXT_INSN (insn))
4225 && cprop_cc0_jump (bb, insn, reg_used, src))
4232 else if (GET_CODE (src) == REG
4233 && REGNO (src) >= FIRST_PSEUDO_REGISTER
4234 && REGNO (src) != regno)
4236 if (try_replace_reg (reg_used->reg_rtx, src, insn))
4240 if (gcse_file != NULL)
4242 fprintf (gcse_file, "COPY-PROP: Replacing reg %d in insn %d",
4243 regno, INSN_UID (insn));
4244 fprintf (gcse_file, " with reg %d\n", REGNO (src));
4247 /* The original insn setting reg_used may or may not now be
4248 deletable. We leave the deletion to flow. */
4249 /* FIXME: If it turns out that the insn isn't deletable,
4250 then we may have unnecessarily extended register lifetimes
4251 and made things worse. */
4259 /* Forward propagate copies. This includes copies and constants. Return
4260 non-zero if a change was made. */
4269 /* Note we start at block 1. */
4272 for (bb = 1; bb < n_basic_blocks; bb++)
4274 /* Reset tables used to keep track of what's still valid [since the
4275 start of the block]. */
4276 reset_opr_set_tables ();
4278 for (insn = BLOCK_HEAD (bb);
4279 insn != NULL && insn != NEXT_INSN (BLOCK_END (bb));
4280 insn = NEXT_INSN (insn))
4283 changed |= cprop_insn (BASIC_BLOCK (bb), insn, alter_jumps);
4285 /* Keep track of everything modified by this insn. */
4286 /* ??? Need to be careful w.r.t. mods done to INSN. Don't
4287 call mark_oprs_set if we turned the insn into a NOTE. */
4288 if (GET_CODE (insn) != NOTE)
4289 mark_oprs_set (insn);
4293 if (gcse_file != NULL)
4294 fprintf (gcse_file, "\n");
4299 /* Perform one copy/constant propagation pass.
4300 F is the first insn in the function.
4301 PASS is the pass count. */
4304 one_cprop_pass (pass, alter_jumps)
4310 const_prop_count = 0;
4311 copy_prop_count = 0;
4313 alloc_set_hash_table (max_cuid);
4314 compute_set_hash_table ();
4316 dump_hash_table (gcse_file, "SET", set_hash_table, set_hash_table_size,
4320 alloc_cprop_mem (n_basic_blocks, n_sets);
4321 compute_cprop_data ();
4322 changed = cprop (alter_jumps);
4326 free_set_hash_table ();
4330 fprintf (gcse_file, "CPROP of %s, pass %d: %d bytes needed, ",
4331 current_function_name, pass, bytes_used);
4332 fprintf (gcse_file, "%d const props, %d copy props\n\n",
4333 const_prop_count, copy_prop_count);
4339 /* Compute PRE+LCM working variables. */
4341 /* Local properties of expressions. */
4342 /* Nonzero for expressions that are transparent in the block. */
4343 static sbitmap *transp;
4345 /* Nonzero for expressions that are transparent at the end of the block.
4346 This is only zero for expressions killed by abnormal critical edge
4347 created by a calls. */
4348 static sbitmap *transpout;
4350 /* Nonzero for expressions that are computed (available) in the block. */
4351 static sbitmap *comp;
4353 /* Nonzero for expressions that are locally anticipatable in the block. */
4354 static sbitmap *antloc;
4356 /* Nonzero for expressions where this block is an optimal computation
4358 static sbitmap *pre_optimal;
4360 /* Nonzero for expressions which are redundant in a particular block. */
4361 static sbitmap *pre_redundant;
4363 /* Nonzero for expressions which should be inserted on a specific edge. */
4364 static sbitmap *pre_insert_map;
4366 /* Nonzero for expressions which should be deleted in a specific block. */
4367 static sbitmap *pre_delete_map;
4369 /* Contains the edge_list returned by pre_edge_lcm. */
4370 static struct edge_list *edge_list;
4372 /* Redundant insns. */
4373 static sbitmap pre_redundant_insns;
4375 /* Allocate vars used for PRE analysis. */
4378 alloc_pre_mem (n_blocks, n_exprs)
4379 int n_blocks, n_exprs;
4381 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
4382 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
4383 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
4386 pre_redundant = NULL;
4387 pre_insert_map = NULL;
4388 pre_delete_map = NULL;
4391 ae_kill = sbitmap_vector_alloc (n_blocks, n_exprs);
4393 /* pre_insert and pre_delete are allocated later. */
4396 /* Free vars used for PRE analysis. */
4401 sbitmap_vector_free (transp);
4402 sbitmap_vector_free (comp);
4404 /* ANTLOC and AE_KILL are freed just after pre_lcm finishes. */
4407 sbitmap_vector_free (pre_optimal);
4409 sbitmap_vector_free (pre_redundant);
4411 sbitmap_vector_free (pre_insert_map);
4413 sbitmap_vector_free (pre_delete_map);
4415 sbitmap_vector_free (ae_in);
4417 sbitmap_vector_free (ae_out);
4419 transp = comp = NULL;
4420 pre_optimal = pre_redundant = pre_insert_map = pre_delete_map = NULL;
4421 ae_in = ae_out = NULL;
4424 /* Top level routine to do the dataflow analysis needed by PRE. */
4429 sbitmap trapping_expr;
4433 compute_local_properties (transp, comp, antloc, 0);
4434 sbitmap_vector_zero (ae_kill, n_basic_blocks);
4436 /* Collect expressions which might trap. */
4437 trapping_expr = sbitmap_alloc (n_exprs);
4438 sbitmap_zero (trapping_expr);
4439 for (ui = 0; ui < expr_hash_table_size; ui++)
4442 for (e = expr_hash_table[ui]; e != NULL; e = e->next_same_hash)
4443 if (may_trap_p (e->expr))
4444 SET_BIT (trapping_expr, e->bitmap_index);
4447 /* Compute ae_kill for each basic block using:
4451 This is significantly faster than compute_ae_kill. */
4453 for (i = 0; i < n_basic_blocks; i++)
4457 /* If the current block is the destination of an abnormal edge, we
4458 kill all trapping expressions because we won't be able to properly
4459 place the instruction on the edge. So make them neither
4460 anticipatable nor transparent. This is fairly conservative. */
4461 for (e = BASIC_BLOCK (i)->pred; e ; e = e->pred_next)
4462 if (e->flags & EDGE_ABNORMAL)
4464 sbitmap_difference (antloc[i], antloc[i], trapping_expr);
4465 sbitmap_difference (transp[i], transp[i], trapping_expr);
4469 sbitmap_a_or_b (ae_kill[i], transp[i], comp[i]);
4470 sbitmap_not (ae_kill[i], ae_kill[i]);
4473 edge_list = pre_edge_lcm (gcse_file, n_exprs, transp, comp, antloc,
4474 ae_kill, &pre_insert_map, &pre_delete_map);
4475 sbitmap_vector_free (antloc);
4477 sbitmap_vector_free (ae_kill);
4479 sbitmap_free (trapping_expr);
4484 /* Return non-zero if an occurrence of expression EXPR in OCCR_BB would reach
4487 VISITED is a pointer to a working buffer for tracking which BB's have
4488 been visited. It is NULL for the top-level call.
4490 We treat reaching expressions that go through blocks containing the same
4491 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
4492 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
4493 2 as not reaching. The intent is to improve the probability of finding
4494 only one reaching expression and to reduce register lifetimes by picking
4495 the closest such expression. */
4498 pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited)
4499 basic_block occr_bb;
4506 for (pred = bb->pred; pred != NULL; pred = pred->pred_next)
4508 basic_block pred_bb = pred->src;
4510 if (pred->src == ENTRY_BLOCK_PTR
4511 /* Has predecessor has already been visited? */
4512 || visited[pred_bb->index])
4513 ;/* Nothing to do. */
4515 /* Does this predecessor generate this expression? */
4516 else if (TEST_BIT (comp[pred_bb->index], expr->bitmap_index))
4518 /* Is this the occurrence we're looking for?
4519 Note that there's only one generating occurrence per block
4520 so we just need to check the block number. */
4521 if (occr_bb == pred_bb)
4524 visited[pred_bb->index] = 1;
4526 /* Ignore this predecessor if it kills the expression. */
4527 else if (! TEST_BIT (transp[pred_bb->index], expr->bitmap_index))
4528 visited[pred_bb->index] = 1;
4530 /* Neither gen nor kill. */
4533 visited[pred_bb->index] = 1;
4534 if (pre_expr_reaches_here_p_work (occr_bb, expr, pred_bb, visited))
4539 /* All paths have been checked. */
4543 /* The wrapper for pre_expr_reaches_here_work that ensures that any
4544 memory allocated for that function is returned. */
4547 pre_expr_reaches_here_p (occr_bb, expr, bb)
4548 basic_block occr_bb;
4553 char *visited = (char *) xcalloc (n_basic_blocks, 1);
4555 rval = pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited);
4562 /* Given an expr, generate RTL which we can insert at the end of a BB,
4563 or on an edge. Set the block number of any insns generated to
4567 process_insert_insn (expr)
4570 rtx reg = expr->reaching_reg;
4571 rtx exp = copy_rtx (expr->expr);
4576 /* If the expression is something that's an operand, like a constant,
4577 just copy it to a register. */
4578 if (general_operand (exp, GET_MODE (reg)))
4579 emit_move_insn (reg, exp);
4581 /* Otherwise, make a new insn to compute this expression and make sure the
4582 insn will be recognized (this also adds any needed CLOBBERs). Copy the
4583 expression to make sure we don't have any sharing issues. */
4584 else if (insn_invalid_p (emit_insn (gen_rtx_SET (VOIDmode, reg, exp))))
4587 pat = gen_sequence ();
4593 /* Add EXPR to the end of basic block BB.
4595 This is used by both the PRE and code hoisting.
4597 For PRE, we want to verify that the expr is either transparent
4598 or locally anticipatable in the target block. This check makes
4599 no sense for code hoisting. */
4602 insert_insn_end_bb (expr, bb, pre)
4609 rtx reg = expr->reaching_reg;
4610 int regno = REGNO (reg);
4614 pat = process_insert_insn (expr);
4616 /* If the last insn is a jump, insert EXPR in front [taking care to
4617 handle cc0, etc. properly]. Similary we need to care trapping
4618 instructions in presence of non-call exceptions. */
4620 if (GET_CODE (insn) == JUMP_INSN
4621 || (GET_CODE (insn) == INSN
4622 && (bb->succ->succ_next || (bb->succ->flags & EDGE_ABNORMAL))))
4627 /* It should always be the case that we can put these instructions
4628 anywhere in the basic block with performing PRE optimizations.
4630 if (GET_CODE (insn) == INSN && pre
4631 && !TEST_BIT (antloc[bb->index], expr->bitmap_index)
4632 && !TEST_BIT (transp[bb->index], expr->bitmap_index))
4635 /* If this is a jump table, then we can't insert stuff here. Since
4636 we know the previous real insn must be the tablejump, we insert
4637 the new instruction just before the tablejump. */
4638 if (GET_CODE (PATTERN (insn)) == ADDR_VEC
4639 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
4640 insn = prev_real_insn (insn);
4643 /* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts
4644 if cc0 isn't set. */
4645 note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
4647 insn = XEXP (note, 0);
4650 rtx maybe_cc0_setter = prev_nonnote_insn (insn);
4651 if (maybe_cc0_setter
4652 && INSN_P (maybe_cc0_setter)
4653 && sets_cc0_p (PATTERN (maybe_cc0_setter)))
4654 insn = maybe_cc0_setter;
4657 /* FIXME: What if something in cc0/jump uses value set in new insn? */
4658 new_insn = emit_insn_before (pat, insn);
4661 /* Likewise if the last insn is a call, as will happen in the presence
4662 of exception handling. */
4663 else if (GET_CODE (insn) == CALL_INSN
4664 && (bb->succ->succ_next || (bb->succ->flags & EDGE_ABNORMAL)))
4666 /* Keeping in mind SMALL_REGISTER_CLASSES and parameters in registers,
4667 we search backward and place the instructions before the first
4668 parameter is loaded. Do this for everyone for consistency and a
4669 presumtion that we'll get better code elsewhere as well.
4671 It should always be the case that we can put these instructions
4672 anywhere in the basic block with performing PRE optimizations.
4676 && !TEST_BIT (antloc[bb->index], expr->bitmap_index)
4677 && !TEST_BIT (transp[bb->index], expr->bitmap_index))
4680 /* Since different machines initialize their parameter registers
4681 in different orders, assume nothing. Collect the set of all
4682 parameter registers. */
4683 insn = find_first_parameter_load (insn, bb->head);
4685 /* If we found all the parameter loads, then we want to insert
4686 before the first parameter load.
4688 If we did not find all the parameter loads, then we might have
4689 stopped on the head of the block, which could be a CODE_LABEL.
4690 If we inserted before the CODE_LABEL, then we would be putting
4691 the insn in the wrong basic block. In that case, put the insn
4692 after the CODE_LABEL. Also, respect NOTE_INSN_BASIC_BLOCK. */
4693 while (GET_CODE (insn) == CODE_LABEL
4694 || NOTE_INSN_BASIC_BLOCK_P (insn))
4695 insn = NEXT_INSN (insn);
4697 new_insn = emit_insn_before (pat, insn);
4700 new_insn = emit_insn_after (pat, insn);
4702 /* Keep block number table up to date.
4703 Note, PAT could be a multiple insn sequence, we have to make
4704 sure that each insn in the sequence is handled. */
4705 if (GET_CODE (pat) == SEQUENCE)
4707 for (i = 0; i < XVECLEN (pat, 0); i++)
4709 rtx insn = XVECEXP (pat, 0, i);
4711 add_label_notes (PATTERN (insn), new_insn);
4713 note_stores (PATTERN (insn), record_set_info, insn);
4718 add_label_notes (pat, new_insn);
4720 /* Keep register set table up to date. */
4721 record_one_set (regno, new_insn);
4724 gcse_create_count++;
4728 fprintf (gcse_file, "PRE/HOIST: end of bb %d, insn %d, ",
4729 bb->index, INSN_UID (new_insn));
4730 fprintf (gcse_file, "copying expression %d to reg %d\n",
4731 expr->bitmap_index, regno);
4735 /* Insert partially redundant expressions on edges in the CFG to make
4736 the expressions fully redundant. */
4739 pre_edge_insert (edge_list, index_map)
4740 struct edge_list *edge_list;
4741 struct expr **index_map;
4743 int e, i, j, num_edges, set_size, did_insert = 0;
4746 /* Where PRE_INSERT_MAP is nonzero, we add the expression on that edge
4747 if it reaches any of the deleted expressions. */
4749 set_size = pre_insert_map[0]->size;
4750 num_edges = NUM_EDGES (edge_list);
4751 inserted = sbitmap_vector_alloc (num_edges, n_exprs);
4752 sbitmap_vector_zero (inserted, num_edges);
4754 for (e = 0; e < num_edges; e++)
4757 basic_block bb = INDEX_EDGE_PRED_BB (edge_list, e);
4759 for (i = indx = 0; i < set_size; i++, indx += SBITMAP_ELT_BITS)
4761 SBITMAP_ELT_TYPE insert = pre_insert_map[e]->elms[i];
4763 for (j = indx; insert && j < n_exprs; j++, insert >>= 1)
4764 if ((insert & 1) != 0 && index_map[j]->reaching_reg != NULL_RTX)
4766 struct expr *expr = index_map[j];
4769 /* Now look at each deleted occurrence of this expression. */
4770 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
4772 if (! occr->deleted_p)
4775 /* Insert this expression on this edge if if it would
4776 reach the deleted occurrence in BB. */
4777 if (!TEST_BIT (inserted[e], j))
4780 edge eg = INDEX_EDGE (edge_list, e);
4782 /* We can't insert anything on an abnormal and
4783 critical edge, so we insert the insn at the end of
4784 the previous block. There are several alternatives
4785 detailed in Morgans book P277 (sec 10.5) for
4786 handling this situation. This one is easiest for
4789 if ((eg->flags & EDGE_ABNORMAL) == EDGE_ABNORMAL)
4790 insert_insn_end_bb (index_map[j], bb, 0);
4793 insn = process_insert_insn (index_map[j]);
4794 insert_insn_on_edge (insn, eg);
4799 fprintf (gcse_file, "PRE/HOIST: edge (%d,%d), ",
4801 INDEX_EDGE_SUCC_BB (edge_list, e)->index);
4802 fprintf (gcse_file, "copy expression %d\n",
4803 expr->bitmap_index);
4806 update_ld_motion_stores (expr);
4807 SET_BIT (inserted[e], j);
4809 gcse_create_count++;
4816 sbitmap_vector_free (inserted);
4820 /* Copy the result of INSN to REG. INDX is the expression number. */
4823 pre_insert_copy_insn (expr, insn)
4827 rtx reg = expr->reaching_reg;
4828 int regno = REGNO (reg);
4829 int indx = expr->bitmap_index;
4830 rtx set = single_set (insn);
4836 new_insn = emit_insn_after (gen_move_insn (reg, SET_DEST (set)), insn);
4838 /* Keep register set table up to date. */
4839 record_one_set (regno, new_insn);
4841 gcse_create_count++;
4845 "PRE: bb %d, insn %d, copy expression %d in insn %d to reg %d\n",
4846 BLOCK_NUM (insn), INSN_UID (new_insn), indx,
4847 INSN_UID (insn), regno);
4848 update_ld_motion_stores (expr);
4851 /* Copy available expressions that reach the redundant expression
4852 to `reaching_reg'. */
4855 pre_insert_copies ()
4862 /* For each available expression in the table, copy the result to
4863 `reaching_reg' if the expression reaches a deleted one.
4865 ??? The current algorithm is rather brute force.
4866 Need to do some profiling. */
4868 for (i = 0; i < expr_hash_table_size; i++)
4869 for (expr = expr_hash_table[i]; expr != NULL; expr = expr->next_same_hash)
4871 /* If the basic block isn't reachable, PPOUT will be TRUE. However,
4872 we don't want to insert a copy here because the expression may not
4873 really be redundant. So only insert an insn if the expression was
4874 deleted. This test also avoids further processing if the
4875 expression wasn't deleted anywhere. */
4876 if (expr->reaching_reg == NULL)
4879 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
4881 if (! occr->deleted_p)
4884 for (avail = expr->avail_occr; avail != NULL; avail = avail->next)
4886 rtx insn = avail->insn;
4888 /* No need to handle this one if handled already. */
4889 if (avail->copied_p)
4892 /* Don't handle this one if it's a redundant one. */
4893 if (TEST_BIT (pre_redundant_insns, INSN_CUID (insn)))
4896 /* Or if the expression doesn't reach the deleted one. */
4897 if (! pre_expr_reaches_here_p (BLOCK_FOR_INSN (avail->insn),
4899 BLOCK_FOR_INSN (occr->insn)))
4902 /* Copy the result of avail to reaching_reg. */
4903 pre_insert_copy_insn (expr, insn);
4904 avail->copied_p = 1;
4910 /* Delete redundant computations.
4911 Deletion is done by changing the insn to copy the `reaching_reg' of
4912 the expression into the result of the SET. It is left to later passes
4913 (cprop, cse2, flow, combine, regmove) to propagate the copy or eliminate it.
4915 Returns non-zero if a change is made. */
4926 for (i = 0; i < expr_hash_table_size; i++)
4927 for (expr = expr_hash_table[i]; expr != NULL; expr = expr->next_same_hash)
4929 int indx = expr->bitmap_index;
4931 /* We only need to search antic_occr since we require
4934 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
4936 rtx insn = occr->insn;
4938 basic_block bb = BLOCK_FOR_INSN (insn);
4940 if (TEST_BIT (pre_delete_map[bb->index], indx))
4942 set = single_set (insn);
4946 /* Create a pseudo-reg to store the result of reaching
4947 expressions into. Get the mode for the new pseudo from
4948 the mode of the original destination pseudo. */
4949 if (expr->reaching_reg == NULL)
4951 = gen_reg_rtx (GET_MODE (SET_DEST (set)));
4953 /* In theory this should never fail since we're creating
4956 However, on the x86 some of the movXX patterns actually
4957 contain clobbers of scratch regs. This may cause the
4958 insn created by validate_change to not match any pattern
4959 and thus cause validate_change to fail. */
4960 if (validate_change (insn, &SET_SRC (set),
4961 expr->reaching_reg, 0))
4963 occr->deleted_p = 1;
4964 SET_BIT (pre_redundant_insns, INSN_CUID (insn));
4972 "PRE: redundant insn %d (expression %d) in ",
4973 INSN_UID (insn), indx);
4974 fprintf (gcse_file, "bb %d, reaching reg is %d\n",
4975 bb->index, REGNO (expr->reaching_reg));
4984 /* Perform GCSE optimizations using PRE.
4985 This is called by one_pre_gcse_pass after all the dataflow analysis
4988 This is based on the original Morel-Renvoise paper Fred Chow's thesis, and
4989 lazy code motion from Knoop, Ruthing and Steffen as described in Advanced
4990 Compiler Design and Implementation.
4992 ??? A new pseudo reg is created to hold the reaching expression. The nice
4993 thing about the classical approach is that it would try to use an existing
4994 reg. If the register can't be adequately optimized [i.e. we introduce
4995 reload problems], one could add a pass here to propagate the new register
4998 ??? We don't handle single sets in PARALLELs because we're [currently] not
4999 able to copy the rest of the parallel when we insert copies to create full
5000 redundancies from partial redundancies. However, there's no reason why we
5001 can't handle PARALLELs in the cases where there are no partial
5008 int did_insert, changed;
5009 struct expr **index_map;
5012 /* Compute a mapping from expression number (`bitmap_index') to
5013 hash table entry. */
5015 index_map = (struct expr **) xcalloc (n_exprs, sizeof (struct expr *));
5016 for (i = 0; i < expr_hash_table_size; i++)
5017 for (expr = expr_hash_table[i]; expr != NULL; expr = expr->next_same_hash)
5018 index_map[expr->bitmap_index] = expr;
5020 /* Reset bitmap used to track which insns are redundant. */
5021 pre_redundant_insns = sbitmap_alloc (max_cuid);
5022 sbitmap_zero (pre_redundant_insns);
5024 /* Delete the redundant insns first so that
5025 - we know what register to use for the new insns and for the other
5026 ones with reaching expressions
5027 - we know which insns are redundant when we go to create copies */
5029 changed = pre_delete ();
5031 did_insert = pre_edge_insert (edge_list, index_map);
5033 /* In other places with reaching expressions, copy the expression to the
5034 specially allocated pseudo-reg that reaches the redundant expr. */
5035 pre_insert_copies ();
5038 commit_edge_insertions ();
5043 sbitmap_free (pre_redundant_insns);
5047 /* Top level routine to perform one PRE GCSE pass.
5049 Return non-zero if a change was made. */
5052 one_pre_gcse_pass (pass)
5057 gcse_subst_count = 0;
5058 gcse_create_count = 0;
5060 alloc_expr_hash_table (max_cuid);
5061 add_noreturn_fake_exit_edges ();
5063 compute_ld_motion_mems ();
5065 compute_expr_hash_table ();
5066 trim_ld_motion_mems ();
5068 dump_hash_table (gcse_file, "Expression", expr_hash_table,
5069 expr_hash_table_size, n_exprs);
5073 alloc_pre_mem (n_basic_blocks, n_exprs);
5074 compute_pre_data ();
5075 changed |= pre_gcse ();
5076 free_edge_list (edge_list);
5081 remove_fake_edges ();
5082 free_expr_hash_table ();
5086 fprintf (gcse_file, "\nPRE GCSE of %s, pass %d: %d bytes needed, ",
5087 current_function_name, pass, bytes_used);
5088 fprintf (gcse_file, "%d substs, %d insns created\n",
5089 gcse_subst_count, gcse_create_count);
5095 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to INSN.
5096 If notes are added to an insn which references a CODE_LABEL, the
5097 LABEL_NUSES count is incremented. We have to add REG_LABEL notes,
5098 because the following loop optimization pass requires them. */
5100 /* ??? This is very similar to the loop.c add_label_notes function. We
5101 could probably share code here. */
5103 /* ??? If there was a jump optimization pass after gcse and before loop,
5104 then we would not need to do this here, because jump would add the
5105 necessary REG_LABEL notes. */
5108 add_label_notes (x, insn)
5112 enum rtx_code code = GET_CODE (x);
5116 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
5118 /* This code used to ignore labels that referred to dispatch tables to
5119 avoid flow generating (slighly) worse code.
5121 We no longer ignore such label references (see LABEL_REF handling in
5122 mark_jump_label for additional information). */
5124 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (x, 0),
5126 if (LABEL_P (XEXP (x, 0)))
5127 LABEL_NUSES (XEXP (x, 0))++;
5131 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
5134 add_label_notes (XEXP (x, i), insn);
5135 else if (fmt[i] == 'E')
5136 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5137 add_label_notes (XVECEXP (x, i, j), insn);
5141 /* Compute transparent outgoing information for each block.
5143 An expression is transparent to an edge unless it is killed by
5144 the edge itself. This can only happen with abnormal control flow,
5145 when the edge is traversed through a call. This happens with
5146 non-local labels and exceptions.
5148 This would not be necessary if we split the edge. While this is
5149 normally impossible for abnormal critical edges, with some effort
5150 it should be possible with exception handling, since we still have
5151 control over which handler should be invoked. But due to increased
5152 EH table sizes, this may not be worthwhile. */
5155 compute_transpout ()
5161 sbitmap_vector_ones (transpout, n_basic_blocks);
5163 for (bb = 0; bb < n_basic_blocks; ++bb)
5165 /* Note that flow inserted a nop a the end of basic blocks that
5166 end in call instructions for reasons other than abnormal
5168 if (GET_CODE (BLOCK_END (bb)) != CALL_INSN)
5171 for (i = 0; i < expr_hash_table_size; i++)
5172 for (expr = expr_hash_table[i]; expr ; expr = expr->next_same_hash)
5173 if (GET_CODE (expr->expr) == MEM)
5175 if (GET_CODE (XEXP (expr->expr, 0)) == SYMBOL_REF
5176 && CONSTANT_POOL_ADDRESS_P (XEXP (expr->expr, 0)))
5179 /* ??? Optimally, we would use interprocedural alias
5180 analysis to determine if this mem is actually killed
5182 RESET_BIT (transpout[bb], expr->bitmap_index);
5187 /* Removal of useless null pointer checks */
5189 /* Called via note_stores. X is set by SETTER. If X is a register we must
5190 invalidate nonnull_local and set nonnull_killed. DATA is really a
5191 `null_pointer_info *'.
5193 We ignore hard registers. */
5196 invalidate_nonnull_info (x, setter, data)
5198 rtx setter ATTRIBUTE_UNUSED;
5202 struct null_pointer_info *npi = (struct null_pointer_info *) data;
5204 while (GET_CODE (x) == SUBREG)
5207 /* Ignore anything that is not a register or is a hard register. */
5208 if (GET_CODE (x) != REG
5209 || REGNO (x) < npi->min_reg
5210 || REGNO (x) >= npi->max_reg)
5213 regno = REGNO (x) - npi->min_reg;
5215 RESET_BIT (npi->nonnull_local[npi->current_block], regno);
5216 SET_BIT (npi->nonnull_killed[npi->current_block], regno);
5219 /* Do null-pointer check elimination for the registers indicated in
5220 NPI. NONNULL_AVIN and NONNULL_AVOUT are pre-allocated sbitmaps;
5221 they are not our responsibility to free. */
5224 delete_null_pointer_checks_1 (block_reg, nonnull_avin,
5226 unsigned int *block_reg;
5227 sbitmap *nonnull_avin;
5228 sbitmap *nonnull_avout;
5229 struct null_pointer_info *npi;
5233 sbitmap *nonnull_local = npi->nonnull_local;
5234 sbitmap *nonnull_killed = npi->nonnull_killed;
5236 /* Compute local properties, nonnull and killed. A register will have
5237 the nonnull property if at the end of the current block its value is
5238 known to be nonnull. The killed property indicates that somewhere in
5239 the block any information we had about the register is killed.
5241 Note that a register can have both properties in a single block. That
5242 indicates that it's killed, then later in the block a new value is
5244 sbitmap_vector_zero (nonnull_local, n_basic_blocks);
5245 sbitmap_vector_zero (nonnull_killed, n_basic_blocks);
5247 for (current_block = 0; current_block < n_basic_blocks; current_block++)
5249 rtx insn, stop_insn;
5251 /* Set the current block for invalidate_nonnull_info. */
5252 npi->current_block = current_block;
5254 /* Scan each insn in the basic block looking for memory references and
5256 stop_insn = NEXT_INSN (BLOCK_END (current_block));
5257 for (insn = BLOCK_HEAD (current_block);
5259 insn = NEXT_INSN (insn))
5264 /* Ignore anything that is not a normal insn. */
5265 if (! INSN_P (insn))
5268 /* Basically ignore anything that is not a simple SET. We do have
5269 to make sure to invalidate nonnull_local and set nonnull_killed
5270 for such insns though. */
5271 set = single_set (insn);
5274 note_stores (PATTERN (insn), invalidate_nonnull_info, npi);
5278 /* See if we've got a usable memory load. We handle it first
5279 in case it uses its address register as a dest (which kills
5280 the nonnull property). */
5281 if (GET_CODE (SET_SRC (set)) == MEM
5282 && GET_CODE ((reg = XEXP (SET_SRC (set), 0))) == REG
5283 && REGNO (reg) >= npi->min_reg
5284 && REGNO (reg) < npi->max_reg)
5285 SET_BIT (nonnull_local[current_block],
5286 REGNO (reg) - npi->min_reg);
5288 /* Now invalidate stuff clobbered by this insn. */
5289 note_stores (PATTERN (insn), invalidate_nonnull_info, npi);
5291 /* And handle stores, we do these last since any sets in INSN can
5292 not kill the nonnull property if it is derived from a MEM
5293 appearing in a SET_DEST. */
5294 if (GET_CODE (SET_DEST (set)) == MEM
5295 && GET_CODE ((reg = XEXP (SET_DEST (set), 0))) == REG
5296 && REGNO (reg) >= npi->min_reg
5297 && REGNO (reg) < npi->max_reg)
5298 SET_BIT (nonnull_local[current_block],
5299 REGNO (reg) - npi->min_reg);
5303 /* Now compute global properties based on the local properties. This
5304 is a classic global availablity algorithm. */
5305 compute_available (nonnull_local, nonnull_killed,
5306 nonnull_avout, nonnull_avin);
5308 /* Now look at each bb and see if it ends with a compare of a value
5310 for (bb = 0; bb < n_basic_blocks; bb++)
5312 rtx last_insn = BLOCK_END (bb);
5313 rtx condition, earliest;
5314 int compare_and_branch;
5316 /* Since MIN_REG is always at least FIRST_PSEUDO_REGISTER, and
5317 since BLOCK_REG[BB] is zero if this block did not end with a
5318 comparison against zero, this condition works. */
5319 if (block_reg[bb] < npi->min_reg
5320 || block_reg[bb] >= npi->max_reg)
5323 /* LAST_INSN is a conditional jump. Get its condition. */
5324 condition = get_condition (last_insn, &earliest);
5326 /* If we can't determine the condition then skip. */
5330 /* Is the register known to have a nonzero value? */
5331 if (!TEST_BIT (nonnull_avout[bb], block_reg[bb] - npi->min_reg))
5334 /* Try to compute whether the compare/branch at the loop end is one or
5335 two instructions. */
5336 if (earliest == last_insn)
5337 compare_and_branch = 1;
5338 else if (earliest == prev_nonnote_insn (last_insn))
5339 compare_and_branch = 2;
5343 /* We know the register in this comparison is nonnull at exit from
5344 this block. We can optimize this comparison. */
5345 if (GET_CODE (condition) == NE)
5349 new_jump = emit_jump_insn_after (gen_jump (JUMP_LABEL (last_insn)),
5351 JUMP_LABEL (new_jump) = JUMP_LABEL (last_insn);
5352 LABEL_NUSES (JUMP_LABEL (new_jump))++;
5353 emit_barrier_after (new_jump);
5356 delete_insn (last_insn);
5357 if (compare_and_branch == 2)
5358 delete_insn (earliest);
5359 purge_dead_edges (BASIC_BLOCK (bb));
5361 /* Don't check this block again. (Note that BLOCK_END is
5362 invalid here; we deleted the last instruction in the
5368 /* Find EQ/NE comparisons against zero which can be (indirectly) evaluated
5371 This is conceptually similar to global constant/copy propagation and
5372 classic global CSE (it even uses the same dataflow equations as cprop).
5374 If a register is used as memory address with the form (mem (reg)), then we
5375 know that REG can not be zero at that point in the program. Any instruction
5376 which sets REG "kills" this property.
5378 So, if every path leading to a conditional branch has an available memory
5379 reference of that form, then we know the register can not have the value
5380 zero at the conditional branch.
5382 So we merely need to compute the local properies and propagate that data
5383 around the cfg, then optimize where possible.
5385 We run this pass two times. Once before CSE, then again after CSE. This
5386 has proven to be the most profitable approach. It is rare for new
5387 optimization opportunities of this nature to appear after the first CSE
5390 This could probably be integrated with global cprop with a little work. */
5393 delete_null_pointer_checks (f)
5394 rtx f ATTRIBUTE_UNUSED;
5396 sbitmap *nonnull_avin, *nonnull_avout;
5397 unsigned int *block_reg;
5402 struct null_pointer_info npi;
5404 /* If we have only a single block, then there's nothing to do. */
5405 if (n_basic_blocks <= 1)
5408 /* Trying to perform global optimizations on flow graphs which have
5409 a high connectivity will take a long time and is unlikely to be
5410 particularly useful.
5412 In normal circumstances a cfg should have about twice as many edges
5413 as blocks. But we do not want to punish small functions which have
5414 a couple switch statements. So we require a relatively large number
5415 of basic blocks and the ratio of edges to blocks to be high. */
5416 if (n_basic_blocks > 1000 && n_edges / n_basic_blocks >= 20)
5419 /* We need four bitmaps, each with a bit for each register in each
5421 max_reg = max_reg_num ();
5422 regs_per_pass = get_bitmap_width (4, n_basic_blocks, max_reg);
5424 /* Allocate bitmaps to hold local and global properties. */
5425 npi.nonnull_local = sbitmap_vector_alloc (n_basic_blocks, regs_per_pass);
5426 npi.nonnull_killed = sbitmap_vector_alloc (n_basic_blocks, regs_per_pass);
5427 nonnull_avin = sbitmap_vector_alloc (n_basic_blocks, regs_per_pass);
5428 nonnull_avout = sbitmap_vector_alloc (n_basic_blocks, regs_per_pass);
5430 /* Go through the basic blocks, seeing whether or not each block
5431 ends with a conditional branch whose condition is a comparison
5432 against zero. Record the register compared in BLOCK_REG. */
5433 block_reg = (unsigned int *) xcalloc (n_basic_blocks, sizeof (int));
5434 for (bb = 0; bb < n_basic_blocks; bb++)
5436 rtx last_insn = BLOCK_END (bb);
5437 rtx condition, earliest, reg;
5439 /* We only want conditional branches. */
5440 if (GET_CODE (last_insn) != JUMP_INSN
5441 || !any_condjump_p (last_insn)
5442 || !onlyjump_p (last_insn))
5445 /* LAST_INSN is a conditional jump. Get its condition. */
5446 condition = get_condition (last_insn, &earliest);
5448 /* If we were unable to get the condition, or it is not an equality
5449 comparison against zero then there's nothing we can do. */
5451 || (GET_CODE (condition) != NE && GET_CODE (condition) != EQ)
5452 || GET_CODE (XEXP (condition, 1)) != CONST_INT
5453 || (XEXP (condition, 1)
5454 != CONST0_RTX (GET_MODE (XEXP (condition, 0)))))
5457 /* We must be checking a register against zero. */
5458 reg = XEXP (condition, 0);
5459 if (GET_CODE (reg) != REG)
5462 block_reg[bb] = REGNO (reg);
5465 /* Go through the algorithm for each block of registers. */
5466 for (reg = FIRST_PSEUDO_REGISTER; reg < max_reg; reg += regs_per_pass)
5469 npi.max_reg = MIN (reg + regs_per_pass, max_reg);
5470 delete_null_pointer_checks_1 (block_reg, nonnull_avin,
5471 nonnull_avout, &npi);
5474 /* Free the table of registers compared at the end of every block. */
5478 sbitmap_vector_free (npi.nonnull_local);
5479 sbitmap_vector_free (npi.nonnull_killed);
5480 sbitmap_vector_free (nonnull_avin);
5481 sbitmap_vector_free (nonnull_avout);
5484 /* Code Hoisting variables and subroutines. */
5486 /* Very busy expressions. */
5487 static sbitmap *hoist_vbein;
5488 static sbitmap *hoist_vbeout;
5490 /* Hoistable expressions. */
5491 static sbitmap *hoist_exprs;
5493 /* Dominator bitmaps. */
5494 static sbitmap *dominators;
5496 /* ??? We could compute post dominators and run this algorithm in
5497 reverse to to perform tail merging, doing so would probably be
5498 more effective than the tail merging code in jump.c.
5500 It's unclear if tail merging could be run in parallel with
5501 code hoisting. It would be nice. */
5503 /* Allocate vars used for code hoisting analysis. */
5506 alloc_code_hoist_mem (n_blocks, n_exprs)
5507 int n_blocks, n_exprs;
5509 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
5510 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
5511 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
5513 hoist_vbein = sbitmap_vector_alloc (n_blocks, n_exprs);
5514 hoist_vbeout = sbitmap_vector_alloc (n_blocks, n_exprs);
5515 hoist_exprs = sbitmap_vector_alloc (n_blocks, n_exprs);
5516 transpout = sbitmap_vector_alloc (n_blocks, n_exprs);
5518 dominators = sbitmap_vector_alloc (n_blocks, n_blocks);
5521 /* Free vars used for code hoisting analysis. */
5524 free_code_hoist_mem ()
5526 sbitmap_vector_free (antloc);
5527 sbitmap_vector_free (transp);
5528 sbitmap_vector_free (comp);
5530 sbitmap_vector_free (hoist_vbein);
5531 sbitmap_vector_free (hoist_vbeout);
5532 sbitmap_vector_free (hoist_exprs);
5533 sbitmap_vector_free (transpout);
5535 sbitmap_vector_free (dominators);
5538 /* Compute the very busy expressions at entry/exit from each block.
5540 An expression is very busy if all paths from a given point
5541 compute the expression. */
5544 compute_code_hoist_vbeinout ()
5546 int bb, changed, passes;
5548 sbitmap_vector_zero (hoist_vbeout, n_basic_blocks);
5549 sbitmap_vector_zero (hoist_vbein, n_basic_blocks);
5558 /* We scan the blocks in the reverse order to speed up
5560 for (bb = n_basic_blocks - 1; bb >= 0; bb--)
5562 changed |= sbitmap_a_or_b_and_c_cg (hoist_vbein[bb], antloc[bb],
5563 hoist_vbeout[bb], transp[bb]);
5564 if (bb != n_basic_blocks - 1)
5565 sbitmap_intersection_of_succs (hoist_vbeout[bb], hoist_vbein, bb);
5572 fprintf (gcse_file, "hoisting vbeinout computation: %d passes\n", passes);
5575 /* Top level routine to do the dataflow analysis needed by code hoisting. */
5578 compute_code_hoist_data ()
5580 compute_local_properties (transp, comp, antloc, 0);
5581 compute_transpout ();
5582 compute_code_hoist_vbeinout ();
5583 calculate_dominance_info (NULL, dominators, CDI_DOMINATORS);
5585 fprintf (gcse_file, "\n");
5588 /* Determine if the expression identified by EXPR_INDEX would
5589 reach BB unimpared if it was placed at the end of EXPR_BB.
5591 It's unclear exactly what Muchnick meant by "unimpared". It seems
5592 to me that the expression must either be computed or transparent in
5593 *every* block in the path(s) from EXPR_BB to BB. Any other definition
5594 would allow the expression to be hoisted out of loops, even if
5595 the expression wasn't a loop invariant.
5597 Contrast this to reachability for PRE where an expression is
5598 considered reachable if *any* path reaches instead of *all*
5602 hoist_expr_reaches_here_p (expr_bb, expr_index, bb, visited)
5603 basic_block expr_bb;
5609 int visited_allocated_locally = 0;
5612 if (visited == NULL)
5614 visited_allocated_locally = 1;
5615 visited = xcalloc (n_basic_blocks, 1);
5618 for (pred = bb->pred; pred != NULL; pred = pred->pred_next)
5620 basic_block pred_bb = pred->src;
5622 if (pred->src == ENTRY_BLOCK_PTR)
5624 else if (visited[pred_bb->index])
5627 /* Does this predecessor generate this expression? */
5628 else if (TEST_BIT (comp[pred_bb->index], expr_index))
5630 else if (! TEST_BIT (transp[pred_bb->index], expr_index))
5636 visited[pred_bb->index] = 1;
5637 if (! hoist_expr_reaches_here_p (expr_bb, expr_index,
5642 if (visited_allocated_locally)
5645 return (pred == NULL);
5648 /* Actually perform code hoisting. */
5655 struct expr **index_map;
5658 sbitmap_vector_zero (hoist_exprs, n_basic_blocks);
5660 /* Compute a mapping from expression number (`bitmap_index') to
5661 hash table entry. */
5663 index_map = (struct expr **) xcalloc (n_exprs, sizeof (struct expr *));
5664 for (i = 0; i < expr_hash_table_size; i++)
5665 for (expr = expr_hash_table[i]; expr != NULL; expr = expr->next_same_hash)
5666 index_map[expr->bitmap_index] = expr;
5668 /* Walk over each basic block looking for potentially hoistable
5669 expressions, nothing gets hoisted from the entry block. */
5670 for (bb = 0; bb < n_basic_blocks; bb++)
5673 int insn_inserted_p;
5675 /* Examine each expression that is very busy at the exit of this
5676 block. These are the potentially hoistable expressions. */
5677 for (i = 0; i < hoist_vbeout[bb]->n_bits; i++)
5681 if (TEST_BIT (hoist_vbeout[bb], i) && TEST_BIT (transpout[bb], i))
5683 /* We've found a potentially hoistable expression, now
5684 we look at every block BB dominates to see if it
5685 computes the expression. */
5686 for (dominated = 0; dominated < n_basic_blocks; dominated++)
5688 /* Ignore self dominance. */
5690 || ! TEST_BIT (dominators[dominated], bb))
5693 /* We've found a dominated block, now see if it computes
5694 the busy expression and whether or not moving that
5695 expression to the "beginning" of that block is safe. */
5696 if (!TEST_BIT (antloc[dominated], i))
5699 /* Note if the expression would reach the dominated block
5700 unimpared if it was placed at the end of BB.
5702 Keep track of how many times this expression is hoistable
5703 from a dominated block into BB. */
5704 if (hoist_expr_reaches_here_p (BASIC_BLOCK (bb), i,
5705 BASIC_BLOCK (dominated), NULL))
5709 /* If we found more than one hoistable occurrence of this
5710 expression, then note it in the bitmap of expressions to
5711 hoist. It makes no sense to hoist things which are computed
5712 in only one BB, and doing so tends to pessimize register
5713 allocation. One could increase this value to try harder
5714 to avoid any possible code expansion due to register
5715 allocation issues; however experiments have shown that
5716 the vast majority of hoistable expressions are only movable
5717 from two successors, so raising this threshhold is likely
5718 to nullify any benefit we get from code hoisting. */
5721 SET_BIT (hoist_exprs[bb], i);
5727 /* If we found nothing to hoist, then quit now. */
5731 /* Loop over all the hoistable expressions. */
5732 for (i = 0; i < hoist_exprs[bb]->n_bits; i++)
5734 /* We want to insert the expression into BB only once, so
5735 note when we've inserted it. */
5736 insn_inserted_p = 0;
5738 /* These tests should be the same as the tests above. */
5739 if (TEST_BIT (hoist_vbeout[bb], i))
5741 /* We've found a potentially hoistable expression, now
5742 we look at every block BB dominates to see if it
5743 computes the expression. */
5744 for (dominated = 0; dominated < n_basic_blocks; dominated++)
5746 /* Ignore self dominance. */
5748 || ! TEST_BIT (dominators[dominated], bb))
5751 /* We've found a dominated block, now see if it computes
5752 the busy expression and whether or not moving that
5753 expression to the "beginning" of that block is safe. */
5754 if (!TEST_BIT (antloc[dominated], i))
5757 /* The expression is computed in the dominated block and
5758 it would be safe to compute it at the start of the
5759 dominated block. Now we have to determine if the
5760 expression would reach the dominated block if it was
5761 placed at the end of BB. */
5762 if (hoist_expr_reaches_here_p (BASIC_BLOCK (bb), i,
5763 BASIC_BLOCK (dominated), NULL))
5765 struct expr *expr = index_map[i];
5766 struct occr *occr = expr->antic_occr;
5770 /* Find the right occurrence of this expression. */
5771 while (BLOCK_NUM (occr->insn) != dominated && occr)
5774 /* Should never happen. */
5780 set = single_set (insn);
5784 /* Create a pseudo-reg to store the result of reaching
5785 expressions into. Get the mode for the new pseudo
5786 from the mode of the original destination pseudo. */
5787 if (expr->reaching_reg == NULL)
5789 = gen_reg_rtx (GET_MODE (SET_DEST (set)));
5791 /* In theory this should never fail since we're creating
5794 However, on the x86 some of the movXX patterns
5795 actually contain clobbers of scratch regs. This may
5796 cause the insn created by validate_change to not
5797 match any pattern and thus cause validate_change to
5799 if (validate_change (insn, &SET_SRC (set),
5800 expr->reaching_reg, 0))
5802 occr->deleted_p = 1;
5803 if (!insn_inserted_p)
5805 insert_insn_end_bb (index_map[i],
5806 BASIC_BLOCK (bb), 0);
5807 insn_inserted_p = 1;
5819 /* Top level routine to perform one code hoisting (aka unification) pass
5821 Return non-zero if a change was made. */
5824 one_code_hoisting_pass ()
5828 alloc_expr_hash_table (max_cuid);
5829 compute_expr_hash_table ();
5831 dump_hash_table (gcse_file, "Code Hosting Expressions", expr_hash_table,
5832 expr_hash_table_size, n_exprs);
5836 alloc_code_hoist_mem (n_basic_blocks, n_exprs);
5837 compute_code_hoist_data ();
5839 free_code_hoist_mem ();
5842 free_expr_hash_table ();
5847 /* Here we provide the things required to do store motion towards
5848 the exit. In order for this to be effective, gcse also needed to
5849 be taught how to move a load when it is kill only by a store to itself.
5854 void foo(float scale)
5856 for (i=0; i<10; i++)
5860 'i' is both loaded and stored to in the loop. Normally, gcse cannot move
5861 the load out since its live around the loop, and stored at the bottom
5864 The 'Load Motion' referred to and implemented in this file is
5865 an enhancement to gcse which when using edge based lcm, recognizes
5866 this situation and allows gcse to move the load out of the loop.
5868 Once gcse has hoisted the load, store motion can then push this
5869 load towards the exit, and we end up with no loads or stores of 'i'
5872 /* This will search the ldst list for a matching expression. If it
5873 doesn't find one, we create one and initialize it. */
5875 static struct ls_expr *
5879 struct ls_expr * ptr;
5881 for (ptr = first_ls_expr(); ptr != NULL; ptr = next_ls_expr (ptr))
5882 if (expr_equiv_p (ptr->pattern, x))
5887 ptr = (struct ls_expr *) xmalloc (sizeof (struct ls_expr));
5889 ptr->next = pre_ldst_mems;
5892 ptr->loads = NULL_RTX;
5893 ptr->stores = NULL_RTX;
5894 ptr->reaching_reg = NULL_RTX;
5897 ptr->hash_index = 0;
5898 pre_ldst_mems = ptr;
5904 /* Free up an individual ldst entry. */
5907 free_ldst_entry (ptr)
5908 struct ls_expr * ptr;
5910 free_INSN_LIST_list (& ptr->loads);
5911 free_INSN_LIST_list (& ptr->stores);
5916 /* Free up all memory associated with the ldst list. */
5921 while (pre_ldst_mems)
5923 struct ls_expr * tmp = pre_ldst_mems;
5925 pre_ldst_mems = pre_ldst_mems->next;
5927 free_ldst_entry (tmp);
5930 pre_ldst_mems = NULL;
5933 /* Dump debugging info about the ldst list. */
5936 print_ldst_list (file)
5939 struct ls_expr * ptr;
5941 fprintf (file, "LDST list: \n");
5943 for (ptr = first_ls_expr(); ptr != NULL; ptr = next_ls_expr (ptr))
5945 fprintf (file, " Pattern (%3d): ", ptr->index);
5947 print_rtl (file, ptr->pattern);
5949 fprintf (file, "\n Loads : ");
5952 print_rtl (file, ptr->loads);
5954 fprintf (file, "(nil)");
5956 fprintf (file, "\n Stores : ");
5959 print_rtl (file, ptr->stores);
5961 fprintf (file, "(nil)");
5963 fprintf (file, "\n\n");
5966 fprintf (file, "\n");
5969 /* Returns 1 if X is in the list of ldst only expressions. */
5971 static struct ls_expr *
5972 find_rtx_in_ldst (x)
5975 struct ls_expr * ptr;
5977 for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next)
5978 if (expr_equiv_p (ptr->pattern, x) && ! ptr->invalid)
5984 /* Assign each element of the list of mems a monotonically increasing value. */
5989 struct ls_expr * ptr;
5992 for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next)
5998 /* Return first item in the list. */
6000 static inline struct ls_expr *
6003 return pre_ldst_mems;
6006 /* Return the next item in ther list after the specified one. */
6008 static inline struct ls_expr *
6010 struct ls_expr * ptr;
6015 /* Load Motion for loads which only kill themselves. */
6017 /* Return true if x is a simple MEM operation, with no registers or
6018 side effects. These are the types of loads we consider for the
6019 ld_motion list, otherwise we let the usual aliasing take care of it. */
6025 if (GET_CODE (x) != MEM)
6028 if (MEM_VOLATILE_P (x))
6031 if (GET_MODE (x) == BLKmode)
6034 if (!rtx_varies_p (XEXP (x, 0), 0))
6040 /* Make sure there isn't a buried reference in this pattern anywhere.
6041 If there is, invalidate the entry for it since we're not capable
6042 of fixing it up just yet.. We have to be sure we know about ALL
6043 loads since the aliasing code will allow all entries in the
6044 ld_motion list to not-alias itself. If we miss a load, we will get
6045 the wrong value since gcse might common it and we won't know to
6049 invalidate_any_buried_refs (x)
6054 struct ls_expr * ptr;
6056 /* Invalidate it in the list. */
6057 if (GET_CODE (x) == MEM && simple_mem (x))
6059 ptr = ldst_entry (x);
6063 /* Recursively process the insn. */
6064 fmt = GET_RTX_FORMAT (GET_CODE (x));
6066 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6069 invalidate_any_buried_refs (XEXP (x, i));
6070 else if (fmt[i] == 'E')
6071 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6072 invalidate_any_buried_refs (XVECEXP (x, i, j));
6076 /* Find all the 'simple' MEMs which are used in LOADs and STORES. Simple
6077 being defined as MEM loads and stores to symbols, with no
6078 side effects and no registers in the expression. If there are any
6079 uses/defs which don't match this criteria, it is invalidated and
6080 trimmed out later. */
6083 compute_ld_motion_mems ()
6085 struct ls_expr * ptr;
6089 pre_ldst_mems = NULL;
6091 for (bb = 0; bb < n_basic_blocks; bb++)
6093 for (insn = BLOCK_HEAD (bb);
6094 insn && insn != NEXT_INSN (BLOCK_END (bb));
6095 insn = NEXT_INSN (insn))
6097 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
6099 if (GET_CODE (PATTERN (insn)) == SET)
6101 rtx src = SET_SRC (PATTERN (insn));
6102 rtx dest = SET_DEST (PATTERN (insn));
6104 /* Check for a simple LOAD... */
6105 if (GET_CODE (src) == MEM && simple_mem (src))
6107 ptr = ldst_entry (src);
6108 if (GET_CODE (dest) == REG)
6109 ptr->loads = alloc_INSN_LIST (insn, ptr->loads);
6115 /* Make sure there isn't a buried load somewhere. */
6116 invalidate_any_buried_refs (src);
6119 /* Check for stores. Don't worry about aliased ones, they
6120 will block any movement we might do later. We only care
6121 about this exact pattern since those are the only
6122 circumstance that we will ignore the aliasing info. */
6123 if (GET_CODE (dest) == MEM && simple_mem (dest))
6125 ptr = ldst_entry (dest);
6127 if (GET_CODE (src) != MEM
6128 && GET_CODE (src) != ASM_OPERANDS)
6129 ptr->stores = alloc_INSN_LIST (insn, ptr->stores);
6135 invalidate_any_buried_refs (PATTERN (insn));
6141 /* Remove any references that have been either invalidated or are not in the
6142 expression list for pre gcse. */
6145 trim_ld_motion_mems ()
6147 struct ls_expr * last = NULL;
6148 struct ls_expr * ptr = first_ls_expr ();
6152 int del = ptr->invalid;
6153 struct expr * expr = NULL;
6155 /* Delete if entry has been made invalid. */
6161 /* Delete if we cannot find this mem in the expression list. */
6162 for (i = 0; i < expr_hash_table_size && del; i++)
6164 for (expr = expr_hash_table[i];
6166 expr = expr->next_same_hash)
6167 if (expr_equiv_p (expr->expr, ptr->pattern))
6179 last->next = ptr->next;
6180 free_ldst_entry (ptr);
6185 pre_ldst_mems = pre_ldst_mems->next;
6186 free_ldst_entry (ptr);
6187 ptr = pre_ldst_mems;
6192 /* Set the expression field if we are keeping it. */
6199 /* Show the world what we've found. */
6200 if (gcse_file && pre_ldst_mems != NULL)
6201 print_ldst_list (gcse_file);
6204 /* This routine will take an expression which we are replacing with
6205 a reaching register, and update any stores that are needed if
6206 that expression is in the ld_motion list. Stores are updated by
6207 copying their SRC to the reaching register, and then storeing
6208 the reaching register into the store location. These keeps the
6209 correct value in the reaching register for the loads. */
6212 update_ld_motion_stores (expr)
6215 struct ls_expr * mem_ptr;
6217 if ((mem_ptr = find_rtx_in_ldst (expr->expr)))
6219 /* We can try to find just the REACHED stores, but is shouldn't
6220 matter to set the reaching reg everywhere... some might be
6221 dead and should be eliminated later. */
6223 /* We replace SET mem = expr with
6225 SET mem = reg , where reg is the
6226 reaching reg used in the load. */
6227 rtx list = mem_ptr->stores;
6229 for ( ; list != NULL_RTX; list = XEXP (list, 1))
6231 rtx insn = XEXP (list, 0);
6232 rtx pat = PATTERN (insn);
6233 rtx src = SET_SRC (pat);
6234 rtx reg = expr->reaching_reg;
6237 /* If we've already copied it, continue. */
6238 if (expr->reaching_reg == src)
6243 fprintf (gcse_file, "PRE: store updated with reaching reg ");
6244 print_rtl (gcse_file, expr->reaching_reg);
6245 fprintf (gcse_file, ":\n ");
6246 print_inline_rtx (gcse_file, insn, 8);
6247 fprintf (gcse_file, "\n");
6250 copy = gen_move_insn ( reg, SET_SRC (pat));
6251 new = emit_insn_before (copy, insn);
6252 record_one_set (REGNO (reg), new);
6253 SET_SRC (pat) = reg;
6255 /* un-recognize this pattern since it's probably different now. */
6256 INSN_CODE (insn) = -1;
6257 gcse_create_count++;
6262 /* Store motion code. */
6264 /* This is used to communicate the target bitvector we want to use in the
6265 reg_set_info routine when called via the note_stores mechanism. */
6266 static sbitmap * regvec;
6268 /* Used in computing the reverse edge graph bit vectors. */
6269 static sbitmap * st_antloc;
6271 /* Global holding the number of store expressions we are dealing with. */
6272 static int num_stores;
6274 /* Checks to set if we need to mark a register set. Called from note_stores. */
6277 reg_set_info (dest, setter, data)
6278 rtx dest, setter ATTRIBUTE_UNUSED;
6279 void * data ATTRIBUTE_UNUSED;
6281 if (GET_CODE (dest) == SUBREG)
6282 dest = SUBREG_REG (dest);
6284 if (GET_CODE (dest) == REG)
6285 SET_BIT (*regvec, REGNO (dest));
6288 /* Return non-zero if the register operands of expression X are killed
6289 anywhere in basic block BB. */
6292 store_ops_ok (x, bb)
6300 /* Repeat is used to turn tail-recursion into iteration. */
6306 code = GET_CODE (x);
6310 /* If a reg has changed after us in this
6311 block, the operand has been killed. */
6312 return TEST_BIT (reg_set_in_block[bb->index], REGNO (x));
6340 i = GET_RTX_LENGTH (code) - 1;
6341 fmt = GET_RTX_FORMAT (code);
6347 rtx tem = XEXP (x, i);
6349 /* If we are about to do the last recursive call
6350 needed at this level, change it into iteration.
6351 This function is called enough to be worth it. */
6358 if (! store_ops_ok (tem, bb))
6361 else if (fmt[i] == 'E')
6365 for (j = 0; j < XVECLEN (x, i); j++)
6367 if (! store_ops_ok (XVECEXP (x, i, j), bb))
6376 /* Determine whether insn is MEM store pattern that we will consider moving. */
6379 find_moveable_store (insn)
6382 struct ls_expr * ptr;
6383 rtx dest = PATTERN (insn);
6385 if (GET_CODE (dest) != SET
6386 || GET_CODE (SET_SRC (dest)) == ASM_OPERANDS)
6389 dest = SET_DEST (dest);
6391 if (GET_CODE (dest) != MEM || MEM_VOLATILE_P (dest)
6392 || GET_MODE (dest) == BLKmode)
6395 if (GET_CODE (XEXP (dest, 0)) != SYMBOL_REF)
6398 if (rtx_varies_p (XEXP (dest, 0), 0))
6401 ptr = ldst_entry (dest);
6402 ptr->stores = alloc_INSN_LIST (insn, ptr->stores);
6405 /* Perform store motion. Much like gcse, except we move expressions the
6406 other way by looking at the flowgraph in reverse. */
6409 compute_store_table ()
6415 max_gcse_regno = max_reg_num ();
6417 reg_set_in_block = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks,
6419 sbitmap_vector_zero (reg_set_in_block, n_basic_blocks);
6422 /* Find all the stores we care about. */
6423 for (bb = 0; bb < n_basic_blocks; bb++)
6425 regvec = & (reg_set_in_block[bb]);
6426 for (insn = BLOCK_END (bb);
6427 insn && insn != PREV_INSN (BLOCK_HEAD (bb));
6428 insn = PREV_INSN (insn))
6430 /* Ignore anything that is not a normal insn. */
6431 if (! INSN_P (insn))
6434 if (GET_CODE (insn) == CALL_INSN)
6436 bool clobbers_all = false;
6437 #ifdef NON_SAVING_SETJMP
6438 if (NON_SAVING_SETJMP
6439 && find_reg_note (insn, REG_SETJMP, NULL_RTX))
6440 clobbers_all = true;
6443 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
6445 || TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
6446 SET_BIT (reg_set_in_block[bb], regno);
6449 pat = PATTERN (insn);
6450 note_stores (pat, reg_set_info, NULL);
6452 /* Now that we've marked regs, look for stores. */
6453 if (GET_CODE (pat) == SET)
6454 find_moveable_store (insn);
6458 ret = enumerate_ldsts ();
6462 fprintf (gcse_file, "Store Motion Expressions.\n");
6463 print_ldst_list (gcse_file);
6469 /* Check to see if the load X is aliased with STORE_PATTERN. */
6472 load_kills_store (x, store_pattern)
6473 rtx x, store_pattern;
6475 if (true_dependence (x, GET_MODE (x), store_pattern, rtx_addr_varies_p))
6480 /* Go through the entire insn X, looking for any loads which might alias
6481 STORE_PATTERN. Return 1 if found. */
6484 find_loads (x, store_pattern)
6485 rtx x, store_pattern;
6494 if (GET_CODE (x) == SET)
6497 if (GET_CODE (x) == MEM)
6499 if (load_kills_store (x, store_pattern))
6503 /* Recursively process the insn. */
6504 fmt = GET_RTX_FORMAT (GET_CODE (x));
6506 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0 && !ret; i--)
6509 ret |= find_loads (XEXP (x, i), store_pattern);
6510 else if (fmt[i] == 'E')
6511 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6512 ret |= find_loads (XVECEXP (x, i, j), store_pattern);
6517 /* Check if INSN kills the store pattern X (is aliased with it).
6518 Return 1 if it it does. */
6521 store_killed_in_insn (x, insn)
6524 if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
6527 if (GET_CODE (insn) == CALL_INSN)
6529 /* A normal or pure call might read from pattern,
6530 but a const call will not. */
6531 return ! CONST_OR_PURE_CALL_P (insn) || pure_call_p (insn);
6534 if (GET_CODE (PATTERN (insn)) == SET)
6536 rtx pat = PATTERN (insn);
6537 /* Check for memory stores to aliased objects. */
6538 if (GET_CODE (SET_DEST (pat)) == MEM && !expr_equiv_p (SET_DEST (pat), x))
6539 /* pretend its a load and check for aliasing. */
6540 if (find_loads (SET_DEST (pat), x))
6542 return find_loads (SET_SRC (pat), x);
6545 return find_loads (PATTERN (insn), x);
6548 /* Returns 1 if the expression X is loaded or clobbered on or after INSN
6549 within basic block BB. */
6552 store_killed_after (x, insn, bb)
6561 /* Check if the register operands of the store are OK in this block.
6562 Note that if registers are changed ANYWHERE in the block, we'll
6563 decide we can't move it, regardless of whether it changed above
6564 or below the store. This could be improved by checking the register
6565 operands while lookinng for aliasing in each insn. */
6566 if (!store_ops_ok (XEXP (x, 0), bb))
6569 for ( ; insn && insn != NEXT_INSN (last); insn = NEXT_INSN (insn))
6570 if (store_killed_in_insn (x, insn))
6576 /* Returns 1 if the expression X is loaded or clobbered on or before INSN
6577 within basic block BB. */
6579 store_killed_before (x, insn, bb)
6583 rtx first = bb->head;
6586 return store_killed_in_insn (x, insn);
6588 /* Check if the register operands of the store are OK in this block.
6589 Note that if registers are changed ANYWHERE in the block, we'll
6590 decide we can't move it, regardless of whether it changed above
6591 or below the store. This could be improved by checking the register
6592 operands while lookinng for aliasing in each insn. */
6593 if (!store_ops_ok (XEXP (x, 0), bb))
6596 for ( ; insn && insn != PREV_INSN (first); insn = PREV_INSN (insn))
6597 if (store_killed_in_insn (x, insn))
6603 #define ANTIC_STORE_LIST(x) ((x)->loads)
6604 #define AVAIL_STORE_LIST(x) ((x)->stores)
6606 /* Given the table of available store insns at the end of blocks,
6607 determine which ones are not killed by aliasing, and generate
6608 the appropriate vectors for gen and killed. */
6610 build_store_vectors ()
6615 struct ls_expr * ptr;
6617 /* Build the gen_vector. This is any store in the table which is not killed
6618 by aliasing later in its block. */
6619 ae_gen = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, num_stores);
6620 sbitmap_vector_zero (ae_gen, n_basic_blocks);
6622 st_antloc = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, num_stores);
6623 sbitmap_vector_zero (st_antloc, n_basic_blocks);
6625 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
6627 /* Put all the stores into either the antic list, or the avail list,
6629 rtx store_list = ptr->stores;
6630 ptr->stores = NULL_RTX;
6632 for (st = store_list; st != NULL; st = XEXP (st, 1))
6634 insn = XEXP (st, 0);
6635 bb = BLOCK_FOR_INSN (insn);
6637 if (!store_killed_after (ptr->pattern, insn, bb))
6639 /* If we've already seen an availale expression in this block,
6640 we can delete the one we saw already (It occurs earlier in
6641 the block), and replace it with this one). We'll copy the
6642 old SRC expression to an unused register in case there
6643 are any side effects. */
6644 if (TEST_BIT (ae_gen[bb->index], ptr->index))
6646 /* Find previous store. */
6648 for (st = AVAIL_STORE_LIST (ptr); st ; st = XEXP (st, 1))
6649 if (BLOCK_FOR_INSN (XEXP (st, 0)) == bb)
6653 rtx r = gen_reg_rtx (GET_MODE (ptr->pattern));
6655 fprintf (gcse_file, "Removing redundant store:\n");
6656 replace_store_insn (r, XEXP (st, 0), bb);
6657 XEXP (st, 0) = insn;
6661 SET_BIT (ae_gen[bb->index], ptr->index);
6662 AVAIL_STORE_LIST (ptr) = alloc_INSN_LIST (insn,
6663 AVAIL_STORE_LIST (ptr));
6666 if (!store_killed_before (ptr->pattern, insn, bb))
6668 SET_BIT (st_antloc[BLOCK_NUM (insn)], ptr->index);
6669 ANTIC_STORE_LIST (ptr) = alloc_INSN_LIST (insn,
6670 ANTIC_STORE_LIST (ptr));
6674 /* Free the original list of store insns. */
6675 free_INSN_LIST_list (&store_list);
6678 ae_kill = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, num_stores);
6679 sbitmap_vector_zero (ae_kill, n_basic_blocks);
6681 transp = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, num_stores);
6682 sbitmap_vector_zero (transp, n_basic_blocks);
6684 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
6685 for (b = 0; b < n_basic_blocks; b++)
6687 if (store_killed_after (ptr->pattern, BLOCK_HEAD (b), BASIC_BLOCK (b)))
6689 /* The anticipatable expression is not killed if it's gen'd. */
6691 We leave this check out for now. If we have a code sequence
6692 in a block which looks like:
6696 We should flag this as having an ANTIC expression, NOT
6697 transparent, NOT killed, and AVAIL.
6698 Unfortunately, since we haven't re-written all loads to
6699 use the reaching reg, we'll end up doing an incorrect
6700 Load in the middle here if we push the store down. It happens in
6701 gcc.c-torture/execute/960311-1.c with -O3
6702 If we always kill it in this case, we'll sometimes do
6703 uneccessary work, but it shouldn't actually hurt anything.
6704 if (!TEST_BIT (ae_gen[b], ptr->index)). */
6705 SET_BIT (ae_kill[b], ptr->index);
6708 SET_BIT (transp[b], ptr->index);
6711 /* Any block with no exits calls some non-returning function, so
6712 we better mark the store killed here, or we might not store to
6713 it at all. If we knew it was abort, we wouldn't have to store,
6714 but we don't know that for sure. */
6717 fprintf (gcse_file, "ST_avail and ST_antic (shown under loads..)\n");
6718 print_ldst_list (gcse_file);
6719 dump_sbitmap_vector (gcse_file, "st_antloc", "", st_antloc, n_basic_blocks);
6720 dump_sbitmap_vector (gcse_file, "st_kill", "", ae_kill, n_basic_blocks);
6721 dump_sbitmap_vector (gcse_file, "Transpt", "", transp, n_basic_blocks);
6722 dump_sbitmap_vector (gcse_file, "st_avloc", "", ae_gen, n_basic_blocks);
6726 /* Insert an instruction at the begining of a basic block, and update
6727 the BLOCK_HEAD if needed. */
6730 insert_insn_start_bb (insn, bb)
6734 /* Insert at start of successor block. */
6735 rtx prev = PREV_INSN (bb->head);
6736 rtx before = bb->head;
6739 if (GET_CODE (before) != CODE_LABEL
6740 && (GET_CODE (before) != NOTE
6741 || NOTE_LINE_NUMBER (before) != NOTE_INSN_BASIC_BLOCK))
6744 if (prev == bb->end)
6746 before = NEXT_INSN (before);
6749 insn = emit_insn_after (insn, prev);
6753 fprintf (gcse_file, "STORE_MOTION insert store at start of BB %d:\n",
6755 print_inline_rtx (gcse_file, insn, 6);
6756 fprintf (gcse_file, "\n");
6760 /* This routine will insert a store on an edge. EXPR is the ldst entry for
6761 the memory reference, and E is the edge to insert it on. Returns non-zero
6762 if an edge insertion was performed. */
6765 insert_store (expr, e)
6766 struct ls_expr * expr;
6773 /* We did all the deleted before this insert, so if we didn't delete a
6774 store, then we haven't set the reaching reg yet either. */
6775 if (expr->reaching_reg == NULL_RTX)
6778 reg = expr->reaching_reg;
6779 insn = gen_move_insn (expr->pattern, reg);
6781 /* If we are inserting this expression on ALL predecessor edges of a BB,
6782 insert it at the start of the BB, and reset the insert bits on the other
6783 edges so we don't try to insert it on the other edges. */
6785 for (tmp = e->dest->pred; tmp ; tmp = tmp->pred_next)
6787 int index = EDGE_INDEX (edge_list, tmp->src, tmp->dest);
6788 if (index == EDGE_INDEX_NO_EDGE)
6790 if (! TEST_BIT (pre_insert_map[index], expr->index))
6794 /* If tmp is NULL, we found an insertion on every edge, blank the
6795 insertion vector for these edges, and insert at the start of the BB. */
6796 if (!tmp && bb != EXIT_BLOCK_PTR)
6798 for (tmp = e->dest->pred; tmp ; tmp = tmp->pred_next)
6800 int index = EDGE_INDEX (edge_list, tmp->src, tmp->dest);
6801 RESET_BIT (pre_insert_map[index], expr->index);
6803 insert_insn_start_bb (insn, bb);
6807 /* We can't insert on this edge, so we'll insert at the head of the
6808 successors block. See Morgan, sec 10.5. */
6809 if ((e->flags & EDGE_ABNORMAL) == EDGE_ABNORMAL)
6811 insert_insn_start_bb (insn, bb);
6815 insert_insn_on_edge (insn, e);
6819 fprintf (gcse_file, "STORE_MOTION insert insn on edge (%d, %d):\n",
6820 e->src->index, e->dest->index);
6821 print_inline_rtx (gcse_file, insn, 6);
6822 fprintf (gcse_file, "\n");
6828 /* This routine will replace a store with a SET to a specified register. */
6831 replace_store_insn (reg, del, bb)
6837 insn = gen_move_insn (reg, SET_SRC (PATTERN (del)));
6838 insn = emit_insn_after (insn, del);
6843 "STORE_MOTION delete insn in BB %d:\n ", bb->index);
6844 print_inline_rtx (gcse_file, del, 6);
6845 fprintf (gcse_file, "\nSTORE MOTION replaced with insn:\n ");
6846 print_inline_rtx (gcse_file, insn, 6);
6847 fprintf (gcse_file, "\n");
6854 /* Delete a store, but copy the value that would have been stored into
6855 the reaching_reg for later storing. */
6858 delete_store (expr, bb)
6859 struct ls_expr * expr;
6864 if (expr->reaching_reg == NULL_RTX)
6865 expr->reaching_reg = gen_reg_rtx (GET_MODE (expr->pattern));
6868 /* If there is more than 1 store, the earlier ones will be dead,
6869 but it doesn't hurt to replace them here. */
6870 reg = expr->reaching_reg;
6872 for (i = AVAIL_STORE_LIST (expr); i; i = XEXP (i, 1))
6875 if (BLOCK_FOR_INSN (del) == bb)
6877 /* We know there is only one since we deleted redundant
6878 ones during the available computation. */
6879 replace_store_insn (reg, del, bb);
6885 /* Free memory used by store motion. */
6888 free_store_memory ()
6893 sbitmap_vector_free (ae_gen);
6895 sbitmap_vector_free (ae_kill);
6897 sbitmap_vector_free (transp);
6899 sbitmap_vector_free (st_antloc);
6901 sbitmap_vector_free (pre_insert_map);
6903 sbitmap_vector_free (pre_delete_map);
6904 if (reg_set_in_block)
6905 sbitmap_vector_free (reg_set_in_block);
6907 ae_gen = ae_kill = transp = st_antloc = NULL;
6908 pre_insert_map = pre_delete_map = reg_set_in_block = NULL;
6911 /* Perform store motion. Much like gcse, except we move expressions the
6912 other way by looking at the flowgraph in reverse. */
6918 struct ls_expr * ptr;
6919 int update_flow = 0;
6923 fprintf (gcse_file, "before store motion\n");
6924 print_rtl (gcse_file, get_insns ());
6928 init_alias_analysis ();
6930 /* Find all the stores that are live to the end of their block. */
6931 num_stores = compute_store_table ();
6932 if (num_stores == 0)
6934 sbitmap_vector_free (reg_set_in_block);
6935 end_alias_analysis ();
6939 /* Now compute whats actually available to move. */
6940 add_noreturn_fake_exit_edges ();
6941 build_store_vectors ();
6943 edge_list = pre_edge_rev_lcm (gcse_file, num_stores, transp, ae_gen,
6944 st_antloc, ae_kill, &pre_insert_map,
6947 /* Now we want to insert the new stores which are going to be needed. */
6948 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
6950 for (x = 0; x < n_basic_blocks; x++)
6951 if (TEST_BIT (pre_delete_map[x], ptr->index))
6952 delete_store (ptr, BASIC_BLOCK (x));
6954 for (x = 0; x < NUM_EDGES (edge_list); x++)
6955 if (TEST_BIT (pre_insert_map[x], ptr->index))
6956 update_flow |= insert_store (ptr, INDEX_EDGE (edge_list, x));
6960 commit_edge_insertions ();
6962 free_store_memory ();
6963 free_edge_list (edge_list);
6964 remove_fake_edges ();
6965 end_alias_analysis ();