1 /* Common subexpression elimination for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
22 /* Must precede rtl.h for FFS. */
27 #include "hard-reg-set.h"
30 #include "insn-config.h"
35 /* The basic idea of common subexpression elimination is to go
36 through the code, keeping a record of expressions that would
37 have the same value at the current scan point, and replacing
38 expressions encountered with the cheapest equivalent expression.
40 It is too complicated to keep track of the different possibilities
41 when control paths merge; so, at each label, we forget all that is
42 known and start fresh. This can be described as processing each
43 basic block separately. Note, however, that these are not quite
44 the same as the basic blocks found by a later pass and used for
45 data flow analysis and register packing. We do not need to start fresh
46 after a conditional jump instruction if there is no label there.
48 We use two data structures to record the equivalent expressions:
49 a hash table for most expressions, and several vectors together
50 with "quantity numbers" to record equivalent (pseudo) registers.
52 The use of the special data structure for registers is desirable
53 because it is faster. It is possible because registers references
54 contain a fairly small number, the register number, taken from
55 a contiguously allocated series, and two register references are
56 identical if they have the same number. General expressions
57 do not have any such thing, so the only way to retrieve the
58 information recorded on an expression other than a register
59 is to keep it in a hash table.
61 Registers and "quantity numbers":
63 At the start of each basic block, all of the (hardware and pseudo)
64 registers used in the function are given distinct quantity
65 numbers to indicate their contents. During scan, when the code
66 copies one register into another, we copy the quantity number.
67 When a register is loaded in any other way, we allocate a new
68 quantity number to describe the value generated by this operation.
69 `reg_qty' records what quantity a register is currently thought
72 All real quantity numbers are greater than or equal to `max_reg'.
73 If register N has not been assigned a quantity, reg_qty[N] will equal N.
75 Quantity numbers below `max_reg' do not exist and none of the `qty_...'
76 variables should be referenced with an index below `max_reg'.
78 We also maintain a bidirectional chain of registers for each
79 quantity number. `qty_first_reg', `qty_last_reg',
80 `reg_next_eqv' and `reg_prev_eqv' hold these chains.
82 The first register in a chain is the one whose lifespan is least local.
83 Among equals, it is the one that was seen first.
84 We replace any equivalent register with that one.
86 If two registers have the same quantity number, it must be true that
87 REG expressions with `qty_mode' must be in the hash table for both
88 registers and must be in the same class.
90 The converse is not true. Since hard registers may be referenced in
91 any mode, two REG expressions might be equivalent in the hash table
92 but not have the same quantity number if the quantity number of one
93 of the registers is not the same mode as those expressions.
95 Constants and quantity numbers
97 When a quantity has a known constant value, that value is stored
98 in the appropriate element of qty_const. This is in addition to
99 putting the constant in the hash table as is usual for non-regs.
101 Whether a reg or a constant is preferred is determined by the configuration
102 macro CONST_COSTS and will often depend on the constant value. In any
103 event, expressions containing constants can be simplified, by fold_rtx.
105 When a quantity has a known nearly constant value (such as an address
106 of a stack slot), that value is stored in the appropriate element
109 Integer constants don't have a machine mode. However, cse
110 determines the intended machine mode from the destination
111 of the instruction that moves the constant. The machine mode
112 is recorded in the hash table along with the actual RTL
113 constant expression so that different modes are kept separate.
117 To record known equivalences among expressions in general
118 we use a hash table called `table'. It has a fixed number of buckets
119 that contain chains of `struct table_elt' elements for expressions.
120 These chains connect the elements whose expressions have the same
123 Other chains through the same elements connect the elements which
124 currently have equivalent values.
126 Register references in an expression are canonicalized before hashing
127 the expression. This is done using `reg_qty' and `qty_first_reg'.
128 The hash code of a register reference is computed using the quantity
129 number, not the register number.
131 When the value of an expression changes, it is necessary to remove from the
132 hash table not just that expression but all expressions whose values
133 could be different as a result.
135 1. If the value changing is in memory, except in special cases
136 ANYTHING referring to memory could be changed. That is because
137 nobody knows where a pointer does not point.
138 The function `invalidate_memory' removes what is necessary.
140 The special cases are when the address is constant or is
141 a constant plus a fixed register such as the frame pointer
142 or a static chain pointer. When such addresses are stored in,
143 we can tell exactly which other such addresses must be invalidated
144 due to overlap. `invalidate' does this.
145 All expressions that refer to non-constant
146 memory addresses are also invalidated. `invalidate_memory' does this.
148 2. If the value changing is a register, all expressions
149 containing references to that register, and only those,
152 Because searching the entire hash table for expressions that contain
153 a register is very slow, we try to figure out when it isn't necessary.
154 Precisely, this is necessary only when expressions have been
155 entered in the hash table using this register, and then the value has
156 changed, and then another expression wants to be added to refer to
157 the register's new value. This sequence of circumstances is rare
158 within any one basic block.
160 The vectors `reg_tick' and `reg_in_table' are used to detect this case.
161 reg_tick[i] is incremented whenever a value is stored in register i.
162 reg_in_table[i] holds -1 if no references to register i have been
163 entered in the table; otherwise, it contains the value reg_tick[i] had
164 when the references were entered. If we want to enter a reference
165 and reg_in_table[i] != reg_tick[i], we must scan and remove old references.
166 Until we want to enter a new entry, the mere fact that the two vectors
167 don't match makes the entries be ignored if anyone tries to match them.
169 Registers themselves are entered in the hash table as well as in
170 the equivalent-register chains. However, the vectors `reg_tick'
171 and `reg_in_table' do not apply to expressions which are simple
172 register references. These expressions are removed from the table
173 immediately when they become invalid, and this can be done even if
174 we do not immediately search for all the expressions that refer to
177 A CLOBBER rtx in an instruction invalidates its operand for further
178 reuse. A CLOBBER or SET rtx whose operand is a MEM:BLK
179 invalidates everything that resides in memory.
183 Constant expressions that differ only by an additive integer
184 are called related. When a constant expression is put in
185 the table, the related expression with no constant term
186 is also entered. These are made to point at each other
187 so that it is possible to find out if there exists any
188 register equivalent to an expression related to a given expression. */
190 /* One plus largest register number used in this function. */
194 /* Length of vectors indexed by quantity number.
195 We know in advance we will not need a quantity number this big. */
199 /* Next quantity number to be allocated.
200 This is 1 + the largest number needed so far. */
204 /* Indexed by quantity number, gives the first (or last) (pseudo) register
205 in the chain of registers that currently contain this quantity. */
207 static int *qty_first_reg;
208 static int *qty_last_reg;
210 /* Index by quantity number, gives the mode of the quantity. */
212 static enum machine_mode *qty_mode;
214 /* Indexed by quantity number, gives the rtx of the constant value of the
215 quantity, or zero if it does not have a known value.
216 A sum of the frame pointer (or arg pointer) plus a constant
217 can also be entered here. */
219 static rtx *qty_const;
221 /* Indexed by qty number, gives the insn that stored the constant value
222 recorded in `qty_const'. */
224 static rtx *qty_const_insn;
226 /* The next three variables are used to track when a comparison between a
227 quantity and some constant or register has been passed. In that case, we
228 know the results of the comparison in case we see it again. These variables
229 record a comparison that is known to be true. */
231 /* Indexed by qty number, gives the rtx code of a comparison with a known
232 result involving this quantity. If none, it is UNKNOWN. */
233 static enum rtx_code *qty_comparison_code;
235 /* Indexed by qty number, gives the constant being compared against in a
236 comparison of known result. If no such comparison, it is undefined.
237 If the comparison is not with a constant, it is zero. */
239 static rtx *qty_comparison_const;
241 /* Indexed by qty number, gives the quantity being compared against in a
242 comparison of known result. If no such comparison, if it undefined.
243 If the comparison is not with a register, it is -1. */
245 static int *qty_comparison_qty;
248 /* For machines that have a CC0, we do not record its value in the hash
249 table since its use is guaranteed to be the insn immediately following
250 its definition and any other insn is presumed to invalidate it.
252 Instead, we store below the value last assigned to CC0. If it should
253 happen to be a constant, it is stored in preference to the actual
254 assigned value. In case it is a constant, we store the mode in which
255 the constant should be interpreted. */
257 static rtx prev_insn_cc0;
258 static enum machine_mode prev_insn_cc0_mode;
261 /* Previous actual insn. 0 if at first insn of basic block. */
263 static rtx prev_insn;
265 /* Insn being scanned. */
267 static rtx this_insn;
269 /* Index by (pseudo) register number, gives the quantity number
270 of the register's current contents. */
274 /* Index by (pseudo) register number, gives the number of the next (or
275 previous) (pseudo) register in the chain of registers sharing the same
278 Or -1 if this register is at the end of the chain.
280 If reg_qty[N] == N, reg_next_eqv[N] is undefined. */
282 static int *reg_next_eqv;
283 static int *reg_prev_eqv;
285 /* Index by (pseudo) register number, gives the number of times
286 that register has been altered in the current basic block. */
288 static int *reg_tick;
290 /* Index by (pseudo) register number, gives the reg_tick value at which
291 rtx's containing this register are valid in the hash table.
292 If this does not equal the current reg_tick value, such expressions
293 existing in the hash table are invalid.
294 If this is -1, no expressions containing this register have been
295 entered in the table. */
297 static int *reg_in_table;
299 /* A HARD_REG_SET containing all the hard registers for which there is
300 currently a REG expression in the hash table. Note the difference
301 from the above variables, which indicate if the REG is mentioned in some
302 expression in the table. */
304 static HARD_REG_SET hard_regs_in_table;
306 /* A HARD_REG_SET containing all the hard registers that are invalidated
309 static HARD_REG_SET regs_invalidated_by_call;
311 /* Two vectors of ints:
312 one containing max_reg -1's; the other max_reg + 500 (an approximation
313 for max_qty) elements where element i contains i.
314 These are used to initialize various other vectors fast. */
316 static int *all_minus_one;
317 static int *consec_ints;
319 /* CUID of insn that starts the basic block currently being cse-processed. */
321 static int cse_basic_block_start;
323 /* CUID of insn that ends the basic block currently being cse-processed. */
325 static int cse_basic_block_end;
327 /* Vector mapping INSN_UIDs to cuids.
328 The cuids are like uids but increase monotonically always.
329 We use them to see whether a reg is used outside a given basic block. */
331 static int *uid_cuid;
333 /* Highest UID in UID_CUID. */
336 /* Get the cuid of an insn. */
338 #define INSN_CUID(INSN) (uid_cuid[INSN_UID (INSN)])
340 /* Nonzero if cse has altered conditional jump insns
341 in such a way that jump optimization should be redone. */
343 static int cse_jumps_altered;
345 /* canon_hash stores 1 in do_not_record
346 if it notices a reference to CC0, PC, or some other volatile
349 static int do_not_record;
351 /* canon_hash stores 1 in hash_arg_in_memory
352 if it notices a reference to memory within the expression being hashed. */
354 static int hash_arg_in_memory;
356 /* canon_hash stores 1 in hash_arg_in_struct
357 if it notices a reference to memory that's part of a structure. */
359 static int hash_arg_in_struct;
361 /* The hash table contains buckets which are chains of `struct table_elt's,
362 each recording one expression's information.
363 That expression is in the `exp' field.
365 Those elements with the same hash code are chained in both directions
366 through the `next_same_hash' and `prev_same_hash' fields.
368 Each set of expressions with equivalent values
369 are on a two-way chain through the `next_same_value'
370 and `prev_same_value' fields, and all point with
371 the `first_same_value' field at the first element in
372 that chain. The chain is in order of increasing cost.
373 Each element's cost value is in its `cost' field.
375 The `in_memory' field is nonzero for elements that
376 involve any reference to memory. These elements are removed
377 whenever a write is done to an unidentified location in memory.
378 To be safe, we assume that a memory address is unidentified unless
379 the address is either a symbol constant or a constant plus
380 the frame pointer or argument pointer.
382 The `in_struct' field is nonzero for elements that
383 involve any reference to memory inside a structure or array.
385 The `related_value' field is used to connect related expressions
386 (that differ by adding an integer).
387 The related expressions are chained in a circular fashion.
388 `related_value' is zero for expressions for which this
391 The `cost' field stores the cost of this element's expression.
393 The `is_const' flag is set if the element is a constant (including
396 The `flag' field is used as a temporary during some search routines.
398 The `mode' field is usually the same as GET_MODE (`exp'), but
399 if `exp' is a CONST_INT and has no machine mode then the `mode'
400 field is the mode it was being used as. Each constant is
401 recorded separately for each mode it is used with. */
407 struct table_elt *next_same_hash;
408 struct table_elt *prev_same_hash;
409 struct table_elt *next_same_value;
410 struct table_elt *prev_same_value;
411 struct table_elt *first_same_value;
412 struct table_elt *related_value;
414 enum machine_mode mode;
423 /* We don't want a lot of buckets, because we rarely have very many
424 things stored in the hash table, and a lot of buckets slows
425 down a lot of loops that happen frequently. */
428 /* Compute hash code of X in mode M. Special-case case where X is a pseudo
429 register (hard registers may require `do_not_record' to be set). */
432 (GET_CODE (X) == REG && REGNO (X) >= FIRST_PSEUDO_REGISTER \
433 ? ((((int) REG << 7) + reg_qty[REGNO (X)]) % NBUCKETS) \
434 : canon_hash (X, M) % NBUCKETS)
436 /* Determine whether register number N is considered a fixed register for CSE.
437 It is desirable to replace other regs with fixed regs, to reduce need for
439 A reg wins if it is either the frame pointer or designated as fixed,
440 but not if it is an overlapping register. */
441 #ifdef OVERLAPPING_REGNO_P
442 #define FIXED_REGNO_P(N) \
443 (((N) == FRAME_POINTER_REGNUM || (N) == HARD_FRAME_POINTER_REGNUM \
445 && ! OVERLAPPING_REGNO_P ((N)))
447 #define FIXED_REGNO_P(N) \
448 ((N) == FRAME_POINTER_REGNUM || (N) == HARD_FRAME_POINTER_REGNUM \
452 /* Compute cost of X, as stored in the `cost' field of a table_elt. Fixed
453 hard registers and pointers into the frame are the cheapest with a cost
454 of 0. Next come pseudos with a cost of one and other hard registers with
455 a cost of 2. Aside from these special cases, call `rtx_cost'. */
457 #define CHEAP_REG(N) \
458 ((N) == FRAME_POINTER_REGNUM || (N) == HARD_FRAME_POINTER_REGNUM \
459 || (N) == STACK_POINTER_REGNUM || (N) == ARG_POINTER_REGNUM \
460 || ((N) >= FIRST_VIRTUAL_REGISTER && (N) <= LAST_VIRTUAL_REGISTER) \
461 || ((N) < FIRST_PSEUDO_REGISTER \
462 && FIXED_REGNO_P (N) && REGNO_REG_CLASS (N) != NO_REGS))
465 (GET_CODE (X) == REG \
466 ? (CHEAP_REG (REGNO (X)) ? 0 \
467 : REGNO (X) >= FIRST_PSEUDO_REGISTER ? 1 \
469 : rtx_cost (X, SET) * 2)
471 /* Determine if the quantity number for register X represents a valid index
472 into the `qty_...' variables. */
474 #define REGNO_QTY_VALID_P(N) (reg_qty[N] != (N))
476 static struct table_elt *table[NBUCKETS];
478 /* Chain of `struct table_elt's made so far for this function
479 but currently removed from the table. */
481 static struct table_elt *free_element_chain;
483 /* Number of `struct table_elt' structures made so far for this function. */
485 static int n_elements_made;
487 /* Maximum value `n_elements_made' has had so far in this compilation
488 for functions previously processed. */
490 static int max_elements_made;
492 /* Surviving equivalence class when two equivalence classes are merged
493 by recording the effects of a jump in the last insn. Zero if the
494 last insn was not a conditional jump. */
496 static struct table_elt *last_jump_equiv_class;
498 /* Set to the cost of a constant pool reference if one was found for a
499 symbolic constant. If this was found, it means we should try to
500 convert constants into constant pool entries if they don't fit in
503 static int constant_pool_entries_cost;
505 /* Bits describing what kind of values in memory must be invalidated
506 for a particular instruction. If all three bits are zero,
507 no memory refs need to be invalidated. Each bit is more powerful
508 than the preceding ones, and if a bit is set then the preceding
511 Here is how the bits are set:
512 Pushing onto the stack invalidates only the stack pointer,
513 writing at a fixed address invalidates only variable addresses,
514 writing in a structure element at variable address
515 invalidates all but scalar variables,
516 and writing in anything else at variable address invalidates everything. */
520 int sp : 1; /* Invalidate stack pointer. */
521 int var : 1; /* Invalidate variable addresses. */
522 int nonscalar : 1; /* Invalidate all but scalar variables. */
523 int all : 1; /* Invalidate all memory refs. */
526 /* Define maximum length of a branch path. */
528 #define PATHLENGTH 10
530 /* This data describes a block that will be processed by cse_basic_block. */
532 struct cse_basic_block_data {
533 /* Lowest CUID value of insns in block. */
535 /* Highest CUID value of insns in block. */
537 /* Total number of SETs in block. */
539 /* Last insn in the block. */
541 /* Size of current branch path, if any. */
543 /* Current branch path, indicating which branches will be taken. */
545 /* The branch insn. */
547 /* Whether it should be taken or not. AROUND is the same as taken
548 except that it is used when the destination label is not preceded
550 enum taken {TAKEN, NOT_TAKEN, AROUND} status;
554 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
555 virtual regs here because the simplify_*_operation routines are called
556 by integrate.c, which is called before virtual register instantiation. */
558 #define FIXED_BASE_PLUS_P(X) \
559 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
560 || (X) == arg_pointer_rtx \
561 || (X) == virtual_stack_vars_rtx \
562 || (X) == virtual_incoming_args_rtx \
563 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
564 && (XEXP (X, 0) == frame_pointer_rtx \
565 || XEXP (X, 0) == hard_frame_pointer_rtx \
566 || XEXP (X, 0) == arg_pointer_rtx \
567 || XEXP (X, 0) == virtual_stack_vars_rtx \
568 || XEXP (X, 0) == virtual_incoming_args_rtx)))
570 /* Similar, but also allows reference to the stack pointer.
572 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
573 arg_pointer_rtx by itself is nonzero, because on at least one machine,
574 the i960, the arg pointer is zero when it is unused. */
576 #define NONZERO_BASE_PLUS_P(X) \
577 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
578 || (X) == virtual_stack_vars_rtx \
579 || (X) == virtual_incoming_args_rtx \
580 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
581 && (XEXP (X, 0) == frame_pointer_rtx \
582 || XEXP (X, 0) == hard_frame_pointer_rtx \
583 || XEXP (X, 0) == arg_pointer_rtx \
584 || XEXP (X, 0) == virtual_stack_vars_rtx \
585 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
586 || (X) == stack_pointer_rtx \
587 || (X) == virtual_stack_dynamic_rtx \
588 || (X) == virtual_outgoing_args_rtx \
589 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
590 && (XEXP (X, 0) == stack_pointer_rtx \
591 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
592 || XEXP (X, 0) == virtual_outgoing_args_rtx)))
594 static void new_basic_block PROTO((void));
595 static void make_new_qty PROTO((int));
596 static void make_regs_eqv PROTO((int, int));
597 static void delete_reg_equiv PROTO((int));
598 static int mention_regs PROTO((rtx));
599 static int insert_regs PROTO((rtx, struct table_elt *, int));
600 static void free_element PROTO((struct table_elt *));
601 static void remove_from_table PROTO((struct table_elt *, int));
602 static struct table_elt *get_element PROTO((void));
603 static struct table_elt *lookup PROTO((rtx, int, enum machine_mode)),
604 *lookup_for_remove PROTO((rtx, int, enum machine_mode));
605 static rtx lookup_as_function PROTO((rtx, enum rtx_code));
606 static struct table_elt *insert PROTO((rtx, struct table_elt *, int,
608 static void merge_equiv_classes PROTO((struct table_elt *,
609 struct table_elt *));
610 static void invalidate PROTO((rtx));
611 static void remove_invalid_refs PROTO((int));
612 static void rehash_using_reg PROTO((rtx));
613 static void invalidate_memory PROTO((struct write_data *));
614 static void invalidate_for_call PROTO((void));
615 static rtx use_related_value PROTO((rtx, struct table_elt *));
616 static int canon_hash PROTO((rtx, enum machine_mode));
617 static int safe_hash PROTO((rtx, enum machine_mode));
618 static int exp_equiv_p PROTO((rtx, rtx, int, int));
619 static void set_nonvarying_address_components PROTO((rtx, int, rtx *,
622 static int refers_to_p PROTO((rtx, rtx));
623 static int refers_to_mem_p PROTO((rtx, rtx, HOST_WIDE_INT,
625 static int cse_rtx_addr_varies_p PROTO((rtx));
626 static rtx canon_reg PROTO((rtx, rtx));
627 static void find_best_addr PROTO((rtx, rtx *));
628 static enum rtx_code find_comparison_args PROTO((enum rtx_code, rtx *, rtx *,
630 enum machine_mode *));
631 static rtx cse_gen_binary PROTO((enum rtx_code, enum machine_mode,
633 static rtx simplify_plus_minus PROTO((enum rtx_code, enum machine_mode,
635 static rtx fold_rtx PROTO((rtx, rtx));
636 static rtx equiv_constant PROTO((rtx));
637 static void record_jump_equiv PROTO((rtx, int));
638 static void record_jump_cond PROTO((enum rtx_code, enum machine_mode,
640 static void cse_insn PROTO((rtx, int));
641 static void note_mem_written PROTO((rtx, struct write_data *));
642 static void invalidate_from_clobbers PROTO((struct write_data *, rtx));
643 static rtx cse_process_notes PROTO((rtx, rtx));
644 static void cse_around_loop PROTO((rtx));
645 static void invalidate_skipped_set PROTO((rtx, rtx));
646 static void invalidate_skipped_block PROTO((rtx));
647 static void cse_check_loop_start PROTO((rtx, rtx));
648 static void cse_set_around_loop PROTO((rtx, rtx, rtx));
649 static rtx cse_basic_block PROTO((rtx, rtx, struct branch_path *, int));
650 static void count_reg_usage PROTO((rtx, int *, int));
652 /* Return an estimate of the cost of computing rtx X.
653 One use is in cse, to decide which expression to keep in the hash table.
654 Another is in rtl generation, to pick the cheapest way to multiply.
655 Other uses like the latter are expected in the future. */
657 /* Return the right cost to give to an operation
658 to make the cost of the corresponding register-to-register instruction
659 N times that of a fast register-to-register instruction. */
661 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
664 rtx_cost (x, outer_code)
666 enum rtx_code outer_code;
669 register enum rtx_code code;
676 /* Compute the default costs of certain things.
677 Note that RTX_COSTS can override the defaults. */
683 /* Count multiplication by 2**n as a shift,
684 because if we are considering it, we would output it as a shift. */
685 if (GET_CODE (XEXP (x, 1)) == CONST_INT
686 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
689 total = COSTS_N_INSNS (5);
695 total = COSTS_N_INSNS (7);
698 /* Used in loop.c and combine.c as a marker. */
702 /* We don't want these to be used in substitutions because
703 we have no way of validating the resulting insn. So assign
704 anything containing an ASM_OPERANDS a very high cost. */
714 return ! CHEAP_REG (REGNO (x));
717 /* If we can't tie these modes, make this expensive. The larger
718 the mode, the more expensive it is. */
719 if (! MODES_TIEABLE_P (GET_MODE (x), GET_MODE (SUBREG_REG (x))))
720 return COSTS_N_INSNS (2
721 + GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD);
724 RTX_COSTS (x, code, outer_code);
726 CONST_COSTS (x, code, outer_code);
729 /* Sum the costs of the sub-rtx's, plus cost of this operation,
730 which is already in total. */
732 fmt = GET_RTX_FORMAT (code);
733 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
735 total += rtx_cost (XEXP (x, i), code);
736 else if (fmt[i] == 'E')
737 for (j = 0; j < XVECLEN (x, i); j++)
738 total += rtx_cost (XVECEXP (x, i, j), code);
743 /* Clear the hash table and initialize each register with its own quantity,
744 for a new basic block. */
753 bzero (reg_tick, max_reg * sizeof (int));
755 bcopy (all_minus_one, reg_in_table, max_reg * sizeof (int));
756 bcopy (consec_ints, reg_qty, max_reg * sizeof (int));
757 CLEAR_HARD_REG_SET (hard_regs_in_table);
759 /* The per-quantity values used to be initialized here, but it is
760 much faster to initialize each as it is made in `make_new_qty'. */
762 for (i = 0; i < NBUCKETS; i++)
764 register struct table_elt *this, *next;
765 for (this = table[i]; this; this = next)
767 next = this->next_same_hash;
772 bzero (table, sizeof table);
781 /* Say that register REG contains a quantity not in any register before
782 and initialize that quantity. */
790 if (next_qty >= max_qty)
793 q = reg_qty[reg] = next_qty++;
794 qty_first_reg[q] = reg;
795 qty_last_reg[q] = reg;
796 qty_const[q] = qty_const_insn[q] = 0;
797 qty_comparison_code[q] = UNKNOWN;
799 reg_next_eqv[reg] = reg_prev_eqv[reg] = -1;
802 /* Make reg NEW equivalent to reg OLD.
803 OLD is not changing; NEW is. */
806 make_regs_eqv (new, old)
807 register int new, old;
809 register int lastr, firstr;
810 register int q = reg_qty[old];
812 /* Nothing should become eqv until it has a "non-invalid" qty number. */
813 if (! REGNO_QTY_VALID_P (old))
817 firstr = qty_first_reg[q];
818 lastr = qty_last_reg[q];
820 /* Prefer fixed hard registers to anything. Prefer pseudo regs to other
821 hard regs. Among pseudos, if NEW will live longer than any other reg
822 of the same qty, and that is beyond the current basic block,
823 make it the new canonical replacement for this qty. */
824 if (! (firstr < FIRST_PSEUDO_REGISTER && FIXED_REGNO_P (firstr))
825 /* Certain fixed registers might be of the class NO_REGS. This means
826 that not only can they not be allocated by the compiler, but
827 they cannot be used in substitutions or canonicalizations
829 && (new >= FIRST_PSEUDO_REGISTER || REGNO_REG_CLASS (new) != NO_REGS)
830 && ((new < FIRST_PSEUDO_REGISTER && FIXED_REGNO_P (new))
831 || (new >= FIRST_PSEUDO_REGISTER
832 && (firstr < FIRST_PSEUDO_REGISTER
833 || ((uid_cuid[regno_last_uid[new]] > cse_basic_block_end
834 || (uid_cuid[regno_first_uid[new]]
835 < cse_basic_block_start))
836 && (uid_cuid[regno_last_uid[new]]
837 > uid_cuid[regno_last_uid[firstr]]))))))
839 reg_prev_eqv[firstr] = new;
840 reg_next_eqv[new] = firstr;
841 reg_prev_eqv[new] = -1;
842 qty_first_reg[q] = new;
846 /* If NEW is a hard reg (known to be non-fixed), insert at end.
847 Otherwise, insert before any non-fixed hard regs that are at the
848 end. Registers of class NO_REGS cannot be used as an
849 equivalent for anything. */
850 while (lastr < FIRST_PSEUDO_REGISTER && reg_prev_eqv[lastr] >= 0
851 && (REGNO_REG_CLASS (lastr) == NO_REGS || ! FIXED_REGNO_P (lastr))
852 && new >= FIRST_PSEUDO_REGISTER)
853 lastr = reg_prev_eqv[lastr];
854 reg_next_eqv[new] = reg_next_eqv[lastr];
855 if (reg_next_eqv[lastr] >= 0)
856 reg_prev_eqv[reg_next_eqv[lastr]] = new;
858 qty_last_reg[q] = new;
859 reg_next_eqv[lastr] = new;
860 reg_prev_eqv[new] = lastr;
864 /* Remove REG from its equivalence class. */
867 delete_reg_equiv (reg)
870 register int q = reg_qty[reg];
873 /* If invalid, do nothing. */
877 p = reg_prev_eqv[reg];
878 n = reg_next_eqv[reg];
887 qty_first_reg[q] = n;
892 /* Remove any invalid expressions from the hash table
893 that refer to any of the registers contained in expression X.
895 Make sure that newly inserted references to those registers
896 as subexpressions will be considered valid.
898 mention_regs is not called when a register itself
899 is being stored in the table.
901 Return 1 if we have done something that may have changed the hash code
908 register enum rtx_code code;
911 register int changed = 0;
919 register int regno = REGNO (x);
920 register int endregno
921 = regno + (regno >= FIRST_PSEUDO_REGISTER ? 1
922 : HARD_REGNO_NREGS (regno, GET_MODE (x)));
925 for (i = regno; i < endregno; i++)
927 if (reg_in_table[i] >= 0 && reg_in_table[i] != reg_tick[i])
928 remove_invalid_refs (i);
930 reg_in_table[i] = reg_tick[i];
936 /* If X is a comparison or a COMPARE and either operand is a register
937 that does not have a quantity, give it one. This is so that a later
938 call to record_jump_equiv won't cause X to be assigned a different
939 hash code and not found in the table after that call.
941 It is not necessary to do this here, since rehash_using_reg can
942 fix up the table later, but doing this here eliminates the need to
943 call that expensive function in the most common case where the only
944 use of the register is in the comparison. */
946 if (code == COMPARE || GET_RTX_CLASS (code) == '<')
948 if (GET_CODE (XEXP (x, 0)) == REG
949 && ! REGNO_QTY_VALID_P (REGNO (XEXP (x, 0))))
950 if (insert_regs (XEXP (x, 0), NULL_PTR, 0))
952 rehash_using_reg (XEXP (x, 0));
956 if (GET_CODE (XEXP (x, 1)) == REG
957 && ! REGNO_QTY_VALID_P (REGNO (XEXP (x, 1))))
958 if (insert_regs (XEXP (x, 1), NULL_PTR, 0))
960 rehash_using_reg (XEXP (x, 1));
965 fmt = GET_RTX_FORMAT (code);
966 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
968 changed |= mention_regs (XEXP (x, i));
969 else if (fmt[i] == 'E')
970 for (j = 0; j < XVECLEN (x, i); j++)
971 changed |= mention_regs (XVECEXP (x, i, j));
976 /* Update the register quantities for inserting X into the hash table
977 with a value equivalent to CLASSP.
978 (If the class does not contain a REG, it is irrelevant.)
979 If MODIFIED is nonzero, X is a destination; it is being modified.
980 Note that delete_reg_equiv should be called on a register
981 before insert_regs is done on that register with MODIFIED != 0.
983 Nonzero value means that elements of reg_qty have changed
984 so X's hash code may be different. */
987 insert_regs (x, classp, modified)
989 struct table_elt *classp;
992 if (GET_CODE (x) == REG)
994 register int regno = REGNO (x);
996 /* If REGNO is in the equivalence table already but is of the
997 wrong mode for that equivalence, don't do anything here. */
999 if (REGNO_QTY_VALID_P (regno)
1000 && qty_mode[reg_qty[regno]] != GET_MODE (x))
1003 if (modified || ! REGNO_QTY_VALID_P (regno))
1006 for (classp = classp->first_same_value;
1008 classp = classp->next_same_value)
1009 if (GET_CODE (classp->exp) == REG
1010 && GET_MODE (classp->exp) == GET_MODE (x))
1012 make_regs_eqv (regno, REGNO (classp->exp));
1016 make_new_qty (regno);
1017 qty_mode[reg_qty[regno]] = GET_MODE (x);
1024 /* If X is a SUBREG, we will likely be inserting the inner register in the
1025 table. If that register doesn't have an assigned quantity number at
1026 this point but does later, the insertion that we will be doing now will
1027 not be accessible because its hash code will have changed. So assign
1028 a quantity number now. */
1030 else if (GET_CODE (x) == SUBREG && GET_CODE (SUBREG_REG (x)) == REG
1031 && ! REGNO_QTY_VALID_P (REGNO (SUBREG_REG (x))))
1033 insert_regs (SUBREG_REG (x), NULL_PTR, 0);
1034 mention_regs (SUBREG_REG (x));
1038 return mention_regs (x);
1041 /* Look in or update the hash table. */
1043 /* Put the element ELT on the list of free elements. */
1047 struct table_elt *elt;
1049 elt->next_same_hash = free_element_chain;
1050 free_element_chain = elt;
1053 /* Return an element that is free for use. */
1055 static struct table_elt *
1058 struct table_elt *elt = free_element_chain;
1061 free_element_chain = elt->next_same_hash;
1065 return (struct table_elt *) oballoc (sizeof (struct table_elt));
1068 /* Remove table element ELT from use in the table.
1069 HASH is its hash code, made using the HASH macro.
1070 It's an argument because often that is known in advance
1071 and we save much time not recomputing it. */
1074 remove_from_table (elt, hash)
1075 register struct table_elt *elt;
1081 /* Mark this element as removed. See cse_insn. */
1082 elt->first_same_value = 0;
1084 /* Remove the table element from its equivalence class. */
1087 register struct table_elt *prev = elt->prev_same_value;
1088 register struct table_elt *next = elt->next_same_value;
1090 if (next) next->prev_same_value = prev;
1093 prev->next_same_value = next;
1096 register struct table_elt *newfirst = next;
1099 next->first_same_value = newfirst;
1100 next = next->next_same_value;
1105 /* Remove the table element from its hash bucket. */
1108 register struct table_elt *prev = elt->prev_same_hash;
1109 register struct table_elt *next = elt->next_same_hash;
1111 if (next) next->prev_same_hash = prev;
1114 prev->next_same_hash = next;
1115 else if (table[hash] == elt)
1119 /* This entry is not in the proper hash bucket. This can happen
1120 when two classes were merged by `merge_equiv_classes'. Search
1121 for the hash bucket that it heads. This happens only very
1122 rarely, so the cost is acceptable. */
1123 for (hash = 0; hash < NBUCKETS; hash++)
1124 if (table[hash] == elt)
1129 /* Remove the table element from its related-value circular chain. */
1131 if (elt->related_value != 0 && elt->related_value != elt)
1133 register struct table_elt *p = elt->related_value;
1134 while (p->related_value != elt)
1135 p = p->related_value;
1136 p->related_value = elt->related_value;
1137 if (p->related_value == p)
1138 p->related_value = 0;
1144 /* Look up X in the hash table and return its table element,
1145 or 0 if X is not in the table.
1147 MODE is the machine-mode of X, or if X is an integer constant
1148 with VOIDmode then MODE is the mode with which X will be used.
1150 Here we are satisfied to find an expression whose tree structure
1153 static struct table_elt *
1154 lookup (x, hash, mode)
1157 enum machine_mode mode;
1159 register struct table_elt *p;
1161 for (p = table[hash]; p; p = p->next_same_hash)
1162 if (mode == p->mode && ((x == p->exp && GET_CODE (x) == REG)
1163 || exp_equiv_p (x, p->exp, GET_CODE (x) != REG, 0)))
1169 /* Like `lookup' but don't care whether the table element uses invalid regs.
1170 Also ignore discrepancies in the machine mode of a register. */
1172 static struct table_elt *
1173 lookup_for_remove (x, hash, mode)
1176 enum machine_mode mode;
1178 register struct table_elt *p;
1180 if (GET_CODE (x) == REG)
1182 int regno = REGNO (x);
1183 /* Don't check the machine mode when comparing registers;
1184 invalidating (REG:SI 0) also invalidates (REG:DF 0). */
1185 for (p = table[hash]; p; p = p->next_same_hash)
1186 if (GET_CODE (p->exp) == REG
1187 && REGNO (p->exp) == regno)
1192 for (p = table[hash]; p; p = p->next_same_hash)
1193 if (mode == p->mode && (x == p->exp || exp_equiv_p (x, p->exp, 0, 0)))
1200 /* Look for an expression equivalent to X and with code CODE.
1201 If one is found, return that expression. */
1204 lookup_as_function (x, code)
1208 register struct table_elt *p = lookup (x, safe_hash (x, VOIDmode) % NBUCKETS,
1213 for (p = p->first_same_value; p; p = p->next_same_value)
1215 if (GET_CODE (p->exp) == code
1216 /* Make sure this is a valid entry in the table. */
1217 && exp_equiv_p (p->exp, p->exp, 1, 0))
1224 /* Insert X in the hash table, assuming HASH is its hash code
1225 and CLASSP is an element of the class it should go in
1226 (or 0 if a new class should be made).
1227 It is inserted at the proper position to keep the class in
1228 the order cheapest first.
1230 MODE is the machine-mode of X, or if X is an integer constant
1231 with VOIDmode then MODE is the mode with which X will be used.
1233 For elements of equal cheapness, the most recent one
1234 goes in front, except that the first element in the list
1235 remains first unless a cheaper element is added. The order of
1236 pseudo-registers does not matter, as canon_reg will be called to
1237 find the cheapest when a register is retrieved from the table.
1239 The in_memory field in the hash table element is set to 0.
1240 The caller must set it nonzero if appropriate.
1242 You should call insert_regs (X, CLASSP, MODIFY) before calling here,
1243 and if insert_regs returns a nonzero value
1244 you must then recompute its hash code before calling here.
1246 If necessary, update table showing constant values of quantities. */
1248 #define CHEAPER(X,Y) ((X)->cost < (Y)->cost)
1250 static struct table_elt *
1251 insert (x, classp, hash, mode)
1253 register struct table_elt *classp;
1255 enum machine_mode mode;
1257 register struct table_elt *elt;
1259 /* If X is a register and we haven't made a quantity for it,
1260 something is wrong. */
1261 if (GET_CODE (x) == REG && ! REGNO_QTY_VALID_P (REGNO (x)))
1264 /* If X is a hard register, show it is being put in the table. */
1265 if (GET_CODE (x) == REG && REGNO (x) < FIRST_PSEUDO_REGISTER)
1267 int regno = REGNO (x);
1268 int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (x));
1271 for (i = regno; i < endregno; i++)
1272 SET_HARD_REG_BIT (hard_regs_in_table, i);
1276 /* Put an element for X into the right hash bucket. */
1278 elt = get_element ();
1280 elt->cost = COST (x);
1281 elt->next_same_value = 0;
1282 elt->prev_same_value = 0;
1283 elt->next_same_hash = table[hash];
1284 elt->prev_same_hash = 0;
1285 elt->related_value = 0;
1288 elt->is_const = (CONSTANT_P (x)
1289 /* GNU C++ takes advantage of this for `this'
1290 (and other const values). */
1291 || (RTX_UNCHANGING_P (x)
1292 && GET_CODE (x) == REG
1293 && REGNO (x) >= FIRST_PSEUDO_REGISTER)
1294 || FIXED_BASE_PLUS_P (x));
1297 table[hash]->prev_same_hash = elt;
1300 /* Put it into the proper value-class. */
1303 classp = classp->first_same_value;
1304 if (CHEAPER (elt, classp))
1305 /* Insert at the head of the class */
1307 register struct table_elt *p;
1308 elt->next_same_value = classp;
1309 classp->prev_same_value = elt;
1310 elt->first_same_value = elt;
1312 for (p = classp; p; p = p->next_same_value)
1313 p->first_same_value = elt;
1317 /* Insert not at head of the class. */
1318 /* Put it after the last element cheaper than X. */
1319 register struct table_elt *p, *next;
1320 for (p = classp; (next = p->next_same_value) && CHEAPER (next, elt);
1322 /* Put it after P and before NEXT. */
1323 elt->next_same_value = next;
1325 next->prev_same_value = elt;
1326 elt->prev_same_value = p;
1327 p->next_same_value = elt;
1328 elt->first_same_value = classp;
1332 elt->first_same_value = elt;
1334 /* If this is a constant being set equivalent to a register or a register
1335 being set equivalent to a constant, note the constant equivalence.
1337 If this is a constant, it cannot be equivalent to a different constant,
1338 and a constant is the only thing that can be cheaper than a register. So
1339 we know the register is the head of the class (before the constant was
1342 If this is a register that is not already known equivalent to a
1343 constant, we must check the entire class.
1345 If this is a register that is already known equivalent to an insn,
1346 update `qty_const_insn' to show that `this_insn' is the latest
1347 insn making that quantity equivalent to the constant. */
1349 if (elt->is_const && classp && GET_CODE (classp->exp) == REG)
1351 qty_const[reg_qty[REGNO (classp->exp)]]
1352 = gen_lowpart_if_possible (qty_mode[reg_qty[REGNO (classp->exp)]], x);
1353 qty_const_insn[reg_qty[REGNO (classp->exp)]] = this_insn;
1356 else if (GET_CODE (x) == REG && classp && ! qty_const[reg_qty[REGNO (x)]])
1358 register struct table_elt *p;
1360 for (p = classp; p != 0; p = p->next_same_value)
1364 qty_const[reg_qty[REGNO (x)]]
1365 = gen_lowpart_if_possible (GET_MODE (x), p->exp);
1366 qty_const_insn[reg_qty[REGNO (x)]] = this_insn;
1372 else if (GET_CODE (x) == REG && qty_const[reg_qty[REGNO (x)]]
1373 && GET_MODE (x) == qty_mode[reg_qty[REGNO (x)]])
1374 qty_const_insn[reg_qty[REGNO (x)]] = this_insn;
1376 /* If this is a constant with symbolic value,
1377 and it has a term with an explicit integer value,
1378 link it up with related expressions. */
1379 if (GET_CODE (x) == CONST)
1381 rtx subexp = get_related_value (x);
1383 struct table_elt *subelt, *subelt_prev;
1387 /* Get the integer-free subexpression in the hash table. */
1388 subhash = safe_hash (subexp, mode) % NBUCKETS;
1389 subelt = lookup (subexp, subhash, mode);
1391 subelt = insert (subexp, NULL_PTR, subhash, mode);
1392 /* Initialize SUBELT's circular chain if it has none. */
1393 if (subelt->related_value == 0)
1394 subelt->related_value = subelt;
1395 /* Find the element in the circular chain that precedes SUBELT. */
1396 subelt_prev = subelt;
1397 while (subelt_prev->related_value != subelt)
1398 subelt_prev = subelt_prev->related_value;
1399 /* Put new ELT into SUBELT's circular chain just before SUBELT.
1400 This way the element that follows SUBELT is the oldest one. */
1401 elt->related_value = subelt_prev->related_value;
1402 subelt_prev->related_value = elt;
1409 /* Given two equivalence classes, CLASS1 and CLASS2, put all the entries from
1410 CLASS2 into CLASS1. This is done when we have reached an insn which makes
1411 the two classes equivalent.
1413 CLASS1 will be the surviving class; CLASS2 should not be used after this
1416 Any invalid entries in CLASS2 will not be copied. */
1419 merge_equiv_classes (class1, class2)
1420 struct table_elt *class1, *class2;
1422 struct table_elt *elt, *next, *new;
1424 /* Ensure we start with the head of the classes. */
1425 class1 = class1->first_same_value;
1426 class2 = class2->first_same_value;
1428 /* If they were already equal, forget it. */
1429 if (class1 == class2)
1432 for (elt = class2; elt; elt = next)
1436 enum machine_mode mode = elt->mode;
1438 next = elt->next_same_value;
1440 /* Remove old entry, make a new one in CLASS1's class.
1441 Don't do this for invalid entries as we cannot find their
1442 hash code (it also isn't necessary). */
1443 if (GET_CODE (exp) == REG || exp_equiv_p (exp, exp, 1, 0))
1445 hash_arg_in_memory = 0;
1446 hash_arg_in_struct = 0;
1447 hash = HASH (exp, mode);
1449 if (GET_CODE (exp) == REG)
1450 delete_reg_equiv (REGNO (exp));
1452 remove_from_table (elt, hash);
1454 if (insert_regs (exp, class1, 0))
1455 hash = HASH (exp, mode);
1456 new = insert (exp, class1, hash, mode);
1457 new->in_memory = hash_arg_in_memory;
1458 new->in_struct = hash_arg_in_struct;
1463 /* Remove from the hash table, or mark as invalid,
1464 all expressions whose values could be altered by storing in X.
1465 X is a register, a subreg, or a memory reference with nonvarying address
1466 (because, when a memory reference with a varying address is stored in,
1467 all memory references are removed by invalidate_memory
1468 so specific invalidation is superfluous).
1470 A nonvarying address may be just a register or just
1471 a symbol reference, or it may be either of those plus
1472 a numeric offset. */
1479 register struct table_elt *p;
1481 HOST_WIDE_INT start, end;
1483 /* If X is a register, dependencies on its contents
1484 are recorded through the qty number mechanism.
1485 Just change the qty number of the register,
1486 mark it as invalid for expressions that refer to it,
1487 and remove it itself. */
1489 if (GET_CODE (x) == REG)
1491 register int regno = REGNO (x);
1492 register int hash = HASH (x, GET_MODE (x));
1494 /* Remove REGNO from any quantity list it might be on and indicate
1495 that it's value might have changed. If it is a pseudo, remove its
1496 entry from the hash table.
1498 For a hard register, we do the first two actions above for any
1499 additional hard registers corresponding to X. Then, if any of these
1500 registers are in the table, we must remove any REG entries that
1501 overlap these registers. */
1503 delete_reg_equiv (regno);
1506 if (regno >= FIRST_PSEUDO_REGISTER)
1507 remove_from_table (lookup_for_remove (x, hash, GET_MODE (x)), hash);
1510 HOST_WIDE_INT in_table
1511 = TEST_HARD_REG_BIT (hard_regs_in_table, regno);
1512 int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (x));
1513 int tregno, tendregno;
1514 register struct table_elt *p, *next;
1516 CLEAR_HARD_REG_BIT (hard_regs_in_table, regno);
1518 for (i = regno + 1; i < endregno; i++)
1520 in_table |= TEST_HARD_REG_BIT (hard_regs_in_table, i);
1521 CLEAR_HARD_REG_BIT (hard_regs_in_table, i);
1522 delete_reg_equiv (i);
1527 for (hash = 0; hash < NBUCKETS; hash++)
1528 for (p = table[hash]; p; p = next)
1530 next = p->next_same_hash;
1532 if (GET_CODE (p->exp) != REG
1533 || REGNO (p->exp) >= FIRST_PSEUDO_REGISTER)
1536 tregno = REGNO (p->exp);
1538 = tregno + HARD_REGNO_NREGS (tregno, GET_MODE (p->exp));
1539 if (tendregno > regno && tregno < endregno)
1540 remove_from_table (p, hash);
1547 if (GET_CODE (x) == SUBREG)
1549 if (GET_CODE (SUBREG_REG (x)) != REG)
1551 invalidate (SUBREG_REG (x));
1555 /* X is not a register; it must be a memory reference with
1556 a nonvarying address. Remove all hash table elements
1557 that refer to overlapping pieces of memory. */
1559 if (GET_CODE (x) != MEM)
1562 set_nonvarying_address_components (XEXP (x, 0), GET_MODE_SIZE (GET_MODE (x)),
1563 &base, &start, &end);
1565 for (i = 0; i < NBUCKETS; i++)
1567 register struct table_elt *next;
1568 for (p = table[i]; p; p = next)
1570 next = p->next_same_hash;
1571 if (refers_to_mem_p (p->exp, base, start, end))
1572 remove_from_table (p, i);
1577 /* Remove all expressions that refer to register REGNO,
1578 since they are already invalid, and we are about to
1579 mark that register valid again and don't want the old
1580 expressions to reappear as valid. */
1583 remove_invalid_refs (regno)
1587 register struct table_elt *p, *next;
1589 for (i = 0; i < NBUCKETS; i++)
1590 for (p = table[i]; p; p = next)
1592 next = p->next_same_hash;
1593 if (GET_CODE (p->exp) != REG
1594 && refers_to_regno_p (regno, regno + 1, p->exp, NULL_PTR))
1595 remove_from_table (p, i);
1599 /* Recompute the hash codes of any valid entries in the hash table that
1600 reference X, if X is a register, or SUBREG_REG (X) if X is a SUBREG.
1602 This is called when we make a jump equivalence. */
1605 rehash_using_reg (x)
1609 struct table_elt *p, *next;
1612 if (GET_CODE (x) == SUBREG)
1615 /* If X is not a register or if the register is known not to be in any
1616 valid entries in the table, we have no work to do. */
1618 if (GET_CODE (x) != REG
1619 || reg_in_table[REGNO (x)] < 0
1620 || reg_in_table[REGNO (x)] != reg_tick[REGNO (x)])
1623 /* Scan all hash chains looking for valid entries that mention X.
1624 If we find one and it is in the wrong hash chain, move it. We can skip
1625 objects that are registers, since they are handled specially. */
1627 for (i = 0; i < NBUCKETS; i++)
1628 for (p = table[i]; p; p = next)
1630 next = p->next_same_hash;
1631 if (GET_CODE (p->exp) != REG && reg_mentioned_p (x, p->exp)
1632 && exp_equiv_p (p->exp, p->exp, 1, 0)
1633 && i != (hash = safe_hash (p->exp, p->mode) % NBUCKETS))
1635 if (p->next_same_hash)
1636 p->next_same_hash->prev_same_hash = p->prev_same_hash;
1638 if (p->prev_same_hash)
1639 p->prev_same_hash->next_same_hash = p->next_same_hash;
1641 table[i] = p->next_same_hash;
1643 p->next_same_hash = table[hash];
1644 p->prev_same_hash = 0;
1646 table[hash]->prev_same_hash = p;
1652 /* Remove from the hash table all expressions that reference memory,
1653 or some of them as specified by *WRITES. */
1656 invalidate_memory (writes)
1657 struct write_data *writes;
1660 register struct table_elt *p, *next;
1661 int all = writes->all;
1662 int nonscalar = writes->nonscalar;
1664 for (i = 0; i < NBUCKETS; i++)
1665 for (p = table[i]; p; p = next)
1667 next = p->next_same_hash;
1670 || (nonscalar && p->in_struct)
1671 || cse_rtx_addr_varies_p (p->exp)))
1672 remove_from_table (p, i);
1676 /* Remove from the hash table any expression that is a call-clobbered
1677 register. Also update their TICK values. */
1680 invalidate_for_call ()
1682 int regno, endregno;
1685 struct table_elt *p, *next;
1688 /* Go through all the hard registers. For each that is clobbered in
1689 a CALL_INSN, remove the register from quantity chains and update
1690 reg_tick if defined. Also see if any of these registers is currently
1693 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
1694 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
1696 delete_reg_equiv (regno);
1697 if (reg_tick[regno] >= 0)
1700 in_table |= TEST_HARD_REG_BIT (hard_regs_in_table, regno);
1703 /* In the case where we have no call-clobbered hard registers in the
1704 table, we are done. Otherwise, scan the table and remove any
1705 entry that overlaps a call-clobbered register. */
1708 for (hash = 0; hash < NBUCKETS; hash++)
1709 for (p = table[hash]; p; p = next)
1711 next = p->next_same_hash;
1713 if (GET_CODE (p->exp) != REG
1714 || REGNO (p->exp) >= FIRST_PSEUDO_REGISTER)
1717 regno = REGNO (p->exp);
1718 endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (p->exp));
1720 for (i = regno; i < endregno; i++)
1721 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
1723 remove_from_table (p, hash);
1729 /* Given an expression X of type CONST,
1730 and ELT which is its table entry (or 0 if it
1731 is not in the hash table),
1732 return an alternate expression for X as a register plus integer.
1733 If none can be found, return 0. */
1736 use_related_value (x, elt)
1738 struct table_elt *elt;
1740 register struct table_elt *relt = 0;
1741 register struct table_elt *p, *q;
1742 HOST_WIDE_INT offset;
1744 /* First, is there anything related known?
1745 If we have a table element, we can tell from that.
1746 Otherwise, must look it up. */
1748 if (elt != 0 && elt->related_value != 0)
1750 else if (elt == 0 && GET_CODE (x) == CONST)
1752 rtx subexp = get_related_value (x);
1754 relt = lookup (subexp,
1755 safe_hash (subexp, GET_MODE (subexp)) % NBUCKETS,
1762 /* Search all related table entries for one that has an
1763 equivalent register. */
1768 /* This loop is strange in that it is executed in two different cases.
1769 The first is when X is already in the table. Then it is searching
1770 the RELATED_VALUE list of X's class (RELT). The second case is when
1771 X is not in the table. Then RELT points to a class for the related
1774 Ensure that, whatever case we are in, that we ignore classes that have
1775 the same value as X. */
1777 if (rtx_equal_p (x, p->exp))
1780 for (q = p->first_same_value; q; q = q->next_same_value)
1781 if (GET_CODE (q->exp) == REG)
1787 p = p->related_value;
1789 /* We went all the way around, so there is nothing to be found.
1790 Alternatively, perhaps RELT was in the table for some other reason
1791 and it has no related values recorded. */
1792 if (p == relt || p == 0)
1799 offset = (get_integer_term (x) - get_integer_term (p->exp));
1800 /* Note: OFFSET may be 0 if P->xexp and X are related by commutativity. */
1801 return plus_constant (q->exp, offset);
1804 /* Hash an rtx. We are careful to make sure the value is never negative.
1805 Equivalent registers hash identically.
1806 MODE is used in hashing for CONST_INTs only;
1807 otherwise the mode of X is used.
1809 Store 1 in do_not_record if any subexpression is volatile.
1811 Store 1 in hash_arg_in_memory if X contains a MEM rtx
1812 which does not have the RTX_UNCHANGING_P bit set.
1813 In this case, also store 1 in hash_arg_in_struct
1814 if there is a MEM rtx which has the MEM_IN_STRUCT_P bit set.
1816 Note that cse_insn knows that the hash code of a MEM expression
1817 is just (int) MEM plus the hash code of the address. */
1820 canon_hash (x, mode)
1822 enum machine_mode mode;
1825 register int hash = 0;
1826 register enum rtx_code code;
1829 /* repeat is used to turn tail-recursion into iteration. */
1834 code = GET_CODE (x);
1839 register int regno = REGNO (x);
1841 /* On some machines, we can't record any non-fixed hard register,
1842 because extending its life will cause reload problems. We
1843 consider ap, fp, and sp to be fixed for this purpose.
1844 On all machines, we can't record any global registers. */
1846 if (regno < FIRST_PSEUDO_REGISTER
1847 && (global_regs[regno]
1848 #ifdef SMALL_REGISTER_CLASSES
1849 || (! fixed_regs[regno]
1850 && regno != FRAME_POINTER_REGNUM
1851 && regno != HARD_FRAME_POINTER_REGNUM
1852 && regno != ARG_POINTER_REGNUM
1853 && regno != STACK_POINTER_REGNUM)
1860 return hash + ((int) REG << 7) + reg_qty[regno];
1864 hash += ((int) mode + ((int) CONST_INT << 7)
1865 + INTVAL (x) + (INTVAL (x) >> HASHBITS));
1866 return ((1 << HASHBITS) - 1) & hash;
1869 /* This is like the general case, except that it only counts
1870 the integers representing the constant. */
1871 hash += (int) code + (int) GET_MODE (x);
1874 for (i = 2; i < GET_RTX_LENGTH (CONST_DOUBLE); i++)
1876 int tem = XINT (x, i);
1877 hash += ((1 << HASHBITS) - 1) & (tem + (tem >> HASHBITS));
1882 /* Assume there is only one rtx object for any given label. */
1884 /* Use `and' to ensure a positive number. */
1885 return (hash + ((HOST_WIDE_INT) LABEL_REF << 7)
1886 + ((HOST_WIDE_INT) XEXP (x, 0) & ((1 << HASHBITS) - 1)));
1889 return (hash + ((HOST_WIDE_INT) SYMBOL_REF << 7)
1890 + ((HOST_WIDE_INT) XEXP (x, 0) & ((1 << HASHBITS) - 1)));
1893 if (MEM_VOLATILE_P (x))
1898 if (! RTX_UNCHANGING_P (x))
1900 hash_arg_in_memory = 1;
1901 if (MEM_IN_STRUCT_P (x)) hash_arg_in_struct = 1;
1903 /* Now that we have already found this special case,
1904 might as well speed it up as much as possible. */
1916 case UNSPEC_VOLATILE:
1921 if (MEM_VOLATILE_P (x))
1928 i = GET_RTX_LENGTH (code) - 1;
1929 hash += (int) code + (int) GET_MODE (x);
1930 fmt = GET_RTX_FORMAT (code);
1935 rtx tem = XEXP (x, i);
1938 /* If the operand is a REG that is equivalent to a constant, hash
1939 as if we were hashing the constant, since we will be comparing
1941 if (tem != 0 && GET_CODE (tem) == REG
1942 && REGNO_QTY_VALID_P (REGNO (tem))
1943 && qty_mode[reg_qty[REGNO (tem)]] == GET_MODE (tem)
1944 && (tem1 = qty_const[reg_qty[REGNO (tem)]]) != 0
1945 && CONSTANT_P (tem1))
1948 /* If we are about to do the last recursive call
1949 needed at this level, change it into iteration.
1950 This function is called enough to be worth it. */
1956 hash += canon_hash (tem, 0);
1958 else if (fmt[i] == 'E')
1959 for (j = 0; j < XVECLEN (x, i); j++)
1960 hash += canon_hash (XVECEXP (x, i, j), 0);
1961 else if (fmt[i] == 's')
1963 register char *p = XSTR (x, i);
1967 register int tem = *p++;
1968 hash += ((1 << HASHBITS) - 1) & (tem + (tem >> HASHBITS));
1971 else if (fmt[i] == 'i')
1973 register int tem = XINT (x, i);
1974 hash += ((1 << HASHBITS) - 1) & (tem + (tem >> HASHBITS));
1982 /* Like canon_hash but with no side effects. */
1987 enum machine_mode mode;
1989 int save_do_not_record = do_not_record;
1990 int save_hash_arg_in_memory = hash_arg_in_memory;
1991 int save_hash_arg_in_struct = hash_arg_in_struct;
1992 int hash = canon_hash (x, mode);
1993 hash_arg_in_memory = save_hash_arg_in_memory;
1994 hash_arg_in_struct = save_hash_arg_in_struct;
1995 do_not_record = save_do_not_record;
1999 /* Return 1 iff X and Y would canonicalize into the same thing,
2000 without actually constructing the canonicalization of either one.
2001 If VALIDATE is nonzero,
2002 we assume X is an expression being processed from the rtl
2003 and Y was found in the hash table. We check register refs
2004 in Y for being marked as valid.
2006 If EQUAL_VALUES is nonzero, we allow a register to match a constant value
2007 that is known to be in the register. Ordinarily, we don't allow them
2008 to match, because letting them match would cause unpredictable results
2009 in all the places that search a hash table chain for an equivalent
2010 for a given value. A possible equivalent that has different structure
2011 has its hash code computed from different data. Whether the hash code
2012 is the same as that of the the given value is pure luck. */
2015 exp_equiv_p (x, y, validate, equal_values)
2021 register enum rtx_code code;
2024 /* Note: it is incorrect to assume an expression is equivalent to itself
2025 if VALIDATE is nonzero. */
2026 if (x == y && !validate)
2028 if (x == 0 || y == 0)
2031 code = GET_CODE (x);
2032 if (code != GET_CODE (y))
2037 /* If X is a constant and Y is a register or vice versa, they may be
2038 equivalent. We only have to validate if Y is a register. */
2039 if (CONSTANT_P (x) && GET_CODE (y) == REG
2040 && REGNO_QTY_VALID_P (REGNO (y))
2041 && GET_MODE (y) == qty_mode[reg_qty[REGNO (y)]]
2042 && rtx_equal_p (x, qty_const[reg_qty[REGNO (y)]])
2043 && (! validate || reg_in_table[REGNO (y)] == reg_tick[REGNO (y)]))
2046 if (CONSTANT_P (y) && code == REG
2047 && REGNO_QTY_VALID_P (REGNO (x))
2048 && GET_MODE (x) == qty_mode[reg_qty[REGNO (x)]]
2049 && rtx_equal_p (y, qty_const[reg_qty[REGNO (x)]]))
2055 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. */
2056 if (GET_MODE (x) != GET_MODE (y))
2066 return INTVAL (x) == INTVAL (y);
2070 return XEXP (x, 0) == XEXP (y, 0);
2074 int regno = REGNO (y);
2076 = regno + (regno >= FIRST_PSEUDO_REGISTER ? 1
2077 : HARD_REGNO_NREGS (regno, GET_MODE (y)));
2080 /* If the quantities are not the same, the expressions are not
2081 equivalent. If there are and we are not to validate, they
2082 are equivalent. Otherwise, ensure all regs are up-to-date. */
2084 if (reg_qty[REGNO (x)] != reg_qty[regno])
2090 for (i = regno; i < endregno; i++)
2091 if (reg_in_table[i] != reg_tick[i])
2097 /* For commutative operations, check both orders. */
2105 return ((exp_equiv_p (XEXP (x, 0), XEXP (y, 0), validate, equal_values)
2106 && exp_equiv_p (XEXP (x, 1), XEXP (y, 1),
2107 validate, equal_values))
2108 || (exp_equiv_p (XEXP (x, 0), XEXP (y, 1),
2109 validate, equal_values)
2110 && exp_equiv_p (XEXP (x, 1), XEXP (y, 0),
2111 validate, equal_values)));
2114 /* Compare the elements. If any pair of corresponding elements
2115 fail to match, return 0 for the whole things. */
2117 fmt = GET_RTX_FORMAT (code);
2118 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2123 if (! exp_equiv_p (XEXP (x, i), XEXP (y, i), validate, equal_values))
2128 if (XVECLEN (x, i) != XVECLEN (y, i))
2130 for (j = 0; j < XVECLEN (x, i); j++)
2131 if (! exp_equiv_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
2132 validate, equal_values))
2137 if (strcmp (XSTR (x, i), XSTR (y, i)))
2142 if (XINT (x, i) != XINT (y, i))
2147 if (XWINT (x, i) != XWINT (y, i))
2162 /* Return 1 iff any subexpression of X matches Y.
2163 Here we do not require that X or Y be valid (for registers referred to)
2164 for being in the hash table. */
2171 register enum rtx_code code;
2177 if (x == 0 || y == 0)
2180 code = GET_CODE (x);
2181 /* If X as a whole has the same code as Y, they may match.
2183 if (code == GET_CODE (y))
2185 if (exp_equiv_p (x, y, 0, 1))
2189 /* X does not match, so try its subexpressions. */
2191 fmt = GET_RTX_FORMAT (code);
2192 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2201 if (refers_to_p (XEXP (x, i), y))
2204 else if (fmt[i] == 'E')
2207 for (j = 0; j < XVECLEN (x, i); j++)
2208 if (refers_to_p (XVECEXP (x, i, j), y))
2215 /* Given ADDR and SIZE (a memory address, and the size of the memory reference),
2216 set PBASE, PSTART, and PEND which correspond to the base of the address,
2217 the starting offset, and ending offset respectively.
2219 ADDR is known to be a nonvarying address.
2221 cse_address_varies_p returns zero for nonvarying addresses. */
2224 set_nonvarying_address_components (addr, size, pbase, pstart, pend)
2228 HOST_WIDE_INT *pstart, *pend;
2237 /* Registers with nonvarying addresses usually have constant equivalents;
2238 but the frame pointer register is also possible. */
2239 if (GET_CODE (base) == REG
2241 && REGNO_QTY_VALID_P (REGNO (base))
2242 && qty_mode[reg_qty[REGNO (base)]] == GET_MODE (base)
2243 && qty_const[reg_qty[REGNO (base)]] != 0)
2244 base = qty_const[reg_qty[REGNO (base)]];
2245 else if (GET_CODE (base) == PLUS
2246 && GET_CODE (XEXP (base, 1)) == CONST_INT
2247 && GET_CODE (XEXP (base, 0)) == REG
2249 && REGNO_QTY_VALID_P (REGNO (XEXP (base, 0)))
2250 && (qty_mode[reg_qty[REGNO (XEXP (base, 0))]]
2251 == GET_MODE (XEXP (base, 0)))
2252 && qty_const[reg_qty[REGNO (XEXP (base, 0))]])
2254 start = INTVAL (XEXP (base, 1));
2255 base = qty_const[reg_qty[REGNO (XEXP (base, 0))]];
2258 /* By definition, operand1 of a LO_SUM is the associated constant
2259 address. Use the associated constant address as the base instead. */
2260 if (GET_CODE (base) == LO_SUM)
2261 base = XEXP (base, 1);
2263 /* Strip off CONST. */
2264 if (GET_CODE (base) == CONST)
2265 base = XEXP (base, 0);
2267 if (GET_CODE (base) == PLUS
2268 && GET_CODE (XEXP (base, 1)) == CONST_INT)
2270 start += INTVAL (XEXP (base, 1));
2271 base = XEXP (base, 0);
2276 /* Set the return values. */
2282 /* Return 1 iff any subexpression of X refers to memory
2283 at an address of BASE plus some offset
2284 such that any of the bytes' offsets fall between START (inclusive)
2285 and END (exclusive).
2287 The value is undefined if X is a varying address (as determined by
2288 cse_rtx_addr_varies_p). This function is not used in such cases.
2290 When used in the cse pass, `qty_const' is nonzero, and it is used
2291 to treat an address that is a register with a known constant value
2292 as if it were that constant value.
2293 In the loop pass, `qty_const' is zero, so this is not done. */
2296 refers_to_mem_p (x, base, start, end)
2298 HOST_WIDE_INT start, end;
2300 register HOST_WIDE_INT i;
2301 register enum rtx_code code;
2304 if (GET_CODE (base) == CONST_INT)
2306 start += INTVAL (base);
2307 end += INTVAL (base);
2315 code = GET_CODE (x);
2318 register rtx addr = XEXP (x, 0); /* Get the address. */
2320 HOST_WIDE_INT mystart, myend;
2322 set_nonvarying_address_components (addr, GET_MODE_SIZE (GET_MODE (x)),
2323 &mybase, &mystart, &myend);
2326 /* refers_to_mem_p is never called with varying addresses.
2327 If the base addresses are not equal, there is no chance
2328 of the memory addresses conflicting. */
2329 if (! rtx_equal_p (mybase, base))
2332 return myend > start && mystart < end;
2335 /* X does not match, so try its subexpressions. */
2337 fmt = GET_RTX_FORMAT (code);
2338 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2347 if (refers_to_mem_p (XEXP (x, i), base, start, end))
2350 else if (fmt[i] == 'E')
2353 for (j = 0; j < XVECLEN (x, i); j++)
2354 if (refers_to_mem_p (XVECEXP (x, i, j), base, start, end))
2361 /* Nonzero if X refers to memory at a varying address;
2362 except that a register which has at the moment a known constant value
2363 isn't considered variable. */
2366 cse_rtx_addr_varies_p (x)
2369 /* We need not check for X and the equivalence class being of the same
2370 mode because if X is equivalent to a constant in some mode, it
2371 doesn't vary in any mode. */
2373 if (GET_CODE (x) == MEM
2374 && GET_CODE (XEXP (x, 0)) == REG
2375 && REGNO_QTY_VALID_P (REGNO (XEXP (x, 0)))
2376 && GET_MODE (XEXP (x, 0)) == qty_mode[reg_qty[REGNO (XEXP (x, 0))]]
2377 && qty_const[reg_qty[REGNO (XEXP (x, 0))]] != 0)
2380 if (GET_CODE (x) == MEM
2381 && GET_CODE (XEXP (x, 0)) == PLUS
2382 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2383 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
2384 && REGNO_QTY_VALID_P (REGNO (XEXP (XEXP (x, 0), 0)))
2385 && (GET_MODE (XEXP (XEXP (x, 0), 0))
2386 == qty_mode[reg_qty[REGNO (XEXP (XEXP (x, 0), 0))]])
2387 && qty_const[reg_qty[REGNO (XEXP (XEXP (x, 0), 0))]])
2390 return rtx_addr_varies_p (x);
2393 /* Canonicalize an expression:
2394 replace each register reference inside it
2395 with the "oldest" equivalent register.
2397 If INSN is non-zero and we are replacing a pseudo with a hard register
2398 or vice versa, validate_change is used to ensure that INSN remains valid
2399 after we make our substitution. The calls are made with IN_GROUP non-zero
2400 so apply_change_group must be called upon the outermost return from this
2401 function (unless INSN is zero). The result of apply_change_group can
2402 generally be discarded since the changes we are making are optional. */
2410 register enum rtx_code code;
2416 code = GET_CODE (x);
2434 /* Never replace a hard reg, because hard regs can appear
2435 in more than one machine mode, and we must preserve the mode
2436 of each occurrence. Also, some hard regs appear in
2437 MEMs that are shared and mustn't be altered. Don't try to
2438 replace any reg that maps to a reg of class NO_REGS. */
2439 if (REGNO (x) < FIRST_PSEUDO_REGISTER
2440 || ! REGNO_QTY_VALID_P (REGNO (x)))
2443 first = qty_first_reg[reg_qty[REGNO (x)]];
2444 return (first >= FIRST_PSEUDO_REGISTER ? regno_reg_rtx[first]
2445 : REGNO_REG_CLASS (first) == NO_REGS ? x
2446 : gen_rtx (REG, qty_mode[reg_qty[REGNO (x)]], first));
2450 fmt = GET_RTX_FORMAT (code);
2451 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2457 rtx new = canon_reg (XEXP (x, i), insn);
2459 /* If replacing pseudo with hard reg or vice versa, ensure the
2460 insn remains valid. Likewise if the insn has MATCH_DUPs. */
2461 if (insn != 0 && new != 0
2462 && GET_CODE (new) == REG && GET_CODE (XEXP (x, i)) == REG
2463 && (((REGNO (new) < FIRST_PSEUDO_REGISTER)
2464 != (REGNO (XEXP (x, i)) < FIRST_PSEUDO_REGISTER))
2465 || insn_n_dups[recog_memoized (insn)] > 0))
2466 validate_change (insn, &XEXP (x, i), new, 1);
2470 else if (fmt[i] == 'E')
2471 for (j = 0; j < XVECLEN (x, i); j++)
2472 XVECEXP (x, i, j) = canon_reg (XVECEXP (x, i, j), insn);
2478 /* LOC is a location with INSN that is an operand address (the contents of
2479 a MEM). Find the best equivalent address to use that is valid for this
2482 On most CISC machines, complicated address modes are costly, and rtx_cost
2483 is a good approximation for that cost. However, most RISC machines have
2484 only a few (usually only one) memory reference formats. If an address is
2485 valid at all, it is often just as cheap as any other address. Hence, for
2486 RISC machines, we use the configuration macro `ADDRESS_COST' to compare the
2487 costs of various addresses. For two addresses of equal cost, choose the one
2488 with the highest `rtx_cost' value as that has the potential of eliminating
2489 the most insns. For equal costs, we choose the first in the equivalence
2490 class. Note that we ignore the fact that pseudo registers are cheaper
2491 than hard registers here because we would also prefer the pseudo registers.
2495 find_best_addr (insn, loc)
2499 struct table_elt *elt, *p;
2502 int found_better = 1;
2503 int save_do_not_record = do_not_record;
2504 int save_hash_arg_in_memory = hash_arg_in_memory;
2505 int save_hash_arg_in_struct = hash_arg_in_struct;
2510 /* Do not try to replace constant addresses or addresses of local and
2511 argument slots. These MEM expressions are made only once and inserted
2512 in many instructions, as well as being used to control symbol table
2513 output. It is not safe to clobber them.
2515 There are some uncommon cases where the address is already in a register
2516 for some reason, but we cannot take advantage of that because we have
2517 no easy way to unshare the MEM. In addition, looking up all stack
2518 addresses is costly. */
2519 if ((GET_CODE (addr) == PLUS
2520 && GET_CODE (XEXP (addr, 0)) == REG
2521 && GET_CODE (XEXP (addr, 1)) == CONST_INT
2522 && (regno = REGNO (XEXP (addr, 0)),
2523 regno == FRAME_POINTER_REGNUM || regno == HARD_FRAME_POINTER_REGNUM
2524 || regno == ARG_POINTER_REGNUM))
2525 || (GET_CODE (addr) == REG
2526 && (regno = REGNO (addr), regno == FRAME_POINTER_REGNUM
2527 || regno == HARD_FRAME_POINTER_REGNUM
2528 || regno == ARG_POINTER_REGNUM))
2529 || CONSTANT_ADDRESS_P (addr))
2532 /* If this address is not simply a register, try to fold it. This will
2533 sometimes simplify the expression. Many simplifications
2534 will not be valid, but some, usually applying the associative rule, will
2535 be valid and produce better code. */
2536 if (GET_CODE (addr) != REG
2537 && validate_change (insn, loc, fold_rtx (addr, insn), 0))
2540 /* If this address is not in the hash table, we can't look for equivalences
2541 of the whole address. Also, ignore if volatile. */
2544 hash_code = HASH (addr, Pmode);
2545 addr_volatile = do_not_record;
2546 do_not_record = save_do_not_record;
2547 hash_arg_in_memory = save_hash_arg_in_memory;
2548 hash_arg_in_struct = save_hash_arg_in_struct;
2553 elt = lookup (addr, hash_code, Pmode);
2555 #ifndef ADDRESS_COST
2558 our_cost = elt->cost;
2560 /* Find the lowest cost below ours that works. */
2561 for (elt = elt->first_same_value; elt; elt = elt->next_same_value)
2562 if (elt->cost < our_cost
2563 && (GET_CODE (elt->exp) == REG
2564 || exp_equiv_p (elt->exp, elt->exp, 1, 0))
2565 && validate_change (insn, loc,
2566 canon_reg (copy_rtx (elt->exp), NULL_RTX), 0))
2573 /* We need to find the best (under the criteria documented above) entry
2574 in the class that is valid. We use the `flag' field to indicate
2575 choices that were invalid and iterate until we can't find a better
2576 one that hasn't already been tried. */
2578 for (p = elt->first_same_value; p; p = p->next_same_value)
2581 while (found_better)
2583 int best_addr_cost = ADDRESS_COST (*loc);
2584 int best_rtx_cost = (elt->cost + 1) >> 1;
2585 struct table_elt *best_elt = elt;
2588 for (p = elt->first_same_value; p; p = p->next_same_value)
2590 && (GET_CODE (p->exp) == REG
2591 || exp_equiv_p (p->exp, p->exp, 1, 0))
2592 && (ADDRESS_COST (p->exp) < best_addr_cost
2593 || (ADDRESS_COST (p->exp) == best_addr_cost
2594 && (p->cost + 1) >> 1 > best_rtx_cost)))
2597 best_addr_cost = ADDRESS_COST (p->exp);
2598 best_rtx_cost = (p->cost + 1) >> 1;
2604 if (validate_change (insn, loc,
2605 canon_reg (copy_rtx (best_elt->exp),
2614 /* If the address is a binary operation with the first operand a register
2615 and the second a constant, do the same as above, but looking for
2616 equivalences of the register. Then try to simplify before checking for
2617 the best address to use. This catches a few cases: First is when we
2618 have REG+const and the register is another REG+const. We can often merge
2619 the constants and eliminate one insn and one register. It may also be
2620 that a machine has a cheap REG+REG+const. Finally, this improves the
2621 code on the Alpha for unaligned byte stores. */
2623 if (flag_expensive_optimizations
2624 && (GET_RTX_CLASS (GET_CODE (*loc)) == '2'
2625 || GET_RTX_CLASS (GET_CODE (*loc)) == 'c')
2626 && GET_CODE (XEXP (*loc, 0)) == REG
2627 && GET_CODE (XEXP (*loc, 1)) == CONST_INT)
2629 rtx c = XEXP (*loc, 1);
2632 hash_code = HASH (XEXP (*loc, 0), Pmode);
2633 do_not_record = save_do_not_record;
2634 hash_arg_in_memory = save_hash_arg_in_memory;
2635 hash_arg_in_struct = save_hash_arg_in_struct;
2637 elt = lookup (XEXP (*loc, 0), hash_code, Pmode);
2641 /* We need to find the best (under the criteria documented above) entry
2642 in the class that is valid. We use the `flag' field to indicate
2643 choices that were invalid and iterate until we can't find a better
2644 one that hasn't already been tried. */
2646 for (p = elt->first_same_value; p; p = p->next_same_value)
2649 while (found_better)
2651 int best_addr_cost = ADDRESS_COST (*loc);
2652 int best_rtx_cost = (COST (*loc) + 1) >> 1;
2653 struct table_elt *best_elt = elt;
2654 rtx best_rtx = *loc;
2657 for (p = elt->first_same_value; p; p = p->next_same_value)
2659 && (GET_CODE (p->exp) == REG
2660 || exp_equiv_p (p->exp, p->exp, 1, 0)))
2662 rtx new = cse_gen_binary (GET_CODE (*loc), Pmode, p->exp, c);
2664 if ((ADDRESS_COST (new) < best_addr_cost
2665 || (ADDRESS_COST (new) == best_addr_cost
2666 && (COST (new) + 1) >> 1 > best_rtx_cost)))
2669 best_addr_cost = ADDRESS_COST (new);
2670 best_rtx_cost = (COST (new) + 1) >> 1;
2678 if (validate_change (insn, loc,
2679 canon_reg (copy_rtx (best_rtx),
2690 /* Given an operation (CODE, *PARG1, *PARG2), where code is a comparison
2691 operation (EQ, NE, GT, etc.), follow it back through the hash table and
2692 what values are being compared.
2694 *PARG1 and *PARG2 are updated to contain the rtx representing the values
2695 actually being compared. For example, if *PARG1 was (cc0) and *PARG2
2696 was (const_int 0), *PARG1 and *PARG2 will be set to the objects that were
2697 compared to produce cc0.
2699 The return value is the comparison operator and is either the code of
2700 A or the code corresponding to the inverse of the comparison. */
2702 static enum rtx_code
2703 find_comparison_args (code, parg1, parg2, pmode1, pmode2)
2706 enum machine_mode *pmode1, *pmode2;
2710 arg1 = *parg1, arg2 = *parg2;
2712 /* If ARG2 is const0_rtx, see what ARG1 is equivalent to. */
2714 while (arg2 == CONST0_RTX (GET_MODE (arg1)))
2716 /* Set non-zero when we find something of interest. */
2718 int reverse_code = 0;
2719 struct table_elt *p = 0;
2721 /* If arg1 is a COMPARE, extract the comparison arguments from it.
2722 On machines with CC0, this is the only case that can occur, since
2723 fold_rtx will return the COMPARE or item being compared with zero
2726 if (GET_CODE (arg1) == COMPARE && arg2 == const0_rtx)
2729 /* If ARG1 is a comparison operator and CODE is testing for
2730 STORE_FLAG_VALUE, get the inner arguments. */
2732 else if (GET_RTX_CLASS (GET_CODE (arg1)) == '<')
2735 || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_INT
2736 && code == LT && STORE_FLAG_VALUE == -1)
2737 #ifdef FLOAT_STORE_FLAG_VALUE
2738 || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_FLOAT
2739 && FLOAT_STORE_FLAG_VALUE < 0)
2744 || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_INT
2745 && code == GE && STORE_FLAG_VALUE == -1)
2746 #ifdef FLOAT_STORE_FLAG_VALUE
2747 || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_FLOAT
2748 && FLOAT_STORE_FLAG_VALUE < 0)
2751 x = arg1, reverse_code = 1;
2754 /* ??? We could also check for
2756 (ne (and (eq (...) (const_int 1))) (const_int 0))
2758 and related forms, but let's wait until we see them occurring. */
2761 /* Look up ARG1 in the hash table and see if it has an equivalence
2762 that lets us see what is being compared. */
2763 p = lookup (arg1, safe_hash (arg1, GET_MODE (arg1)) % NBUCKETS,
2765 if (p) p = p->first_same_value;
2767 for (; p; p = p->next_same_value)
2769 enum machine_mode inner_mode = GET_MODE (p->exp);
2771 /* If the entry isn't valid, skip it. */
2772 if (! exp_equiv_p (p->exp, p->exp, 1, 0))
2775 if (GET_CODE (p->exp) == COMPARE
2776 /* Another possibility is that this machine has a compare insn
2777 that includes the comparison code. In that case, ARG1 would
2778 be equivalent to a comparison operation that would set ARG1 to
2779 either STORE_FLAG_VALUE or zero. If this is an NE operation,
2780 ORIG_CODE is the actual comparison being done; if it is an EQ,
2781 we must reverse ORIG_CODE. On machine with a negative value
2782 for STORE_FLAG_VALUE, also look at LT and GE operations. */
2785 && GET_MODE_CLASS (inner_mode) == MODE_INT
2786 && (GET_MODE_BITSIZE (inner_mode)
2787 <= HOST_BITS_PER_WIDE_INT)
2788 && (STORE_FLAG_VALUE
2789 & ((HOST_WIDE_INT) 1
2790 << (GET_MODE_BITSIZE (inner_mode) - 1))))
2791 #ifdef FLOAT_STORE_FLAG_VALUE
2793 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
2794 && FLOAT_STORE_FLAG_VALUE < 0)
2797 && GET_RTX_CLASS (GET_CODE (p->exp)) == '<'))
2802 else if ((code == EQ
2804 && GET_MODE_CLASS (inner_mode) == MODE_INT
2805 && (GET_MODE_BITSIZE (inner_mode)
2806 <= HOST_BITS_PER_WIDE_INT)
2807 && (STORE_FLAG_VALUE
2808 & ((HOST_WIDE_INT) 1
2809 << (GET_MODE_BITSIZE (inner_mode) - 1))))
2810 #ifdef FLOAT_STORE_FLAG_VALUE
2812 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
2813 && FLOAT_STORE_FLAG_VALUE < 0)
2816 && GET_RTX_CLASS (GET_CODE (p->exp)) == '<')
2823 /* If this is fp + constant, the equivalent is a better operand since
2824 it may let us predict the value of the comparison. */
2825 else if (NONZERO_BASE_PLUS_P (p->exp))
2832 /* If we didn't find a useful equivalence for ARG1, we are done.
2833 Otherwise, set up for the next iteration. */
2837 arg1 = XEXP (x, 0), arg2 = XEXP (x, 1);
2838 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
2839 code = GET_CODE (x);
2842 code = reverse_condition (code);
2845 /* Return our results. Return the modes from before fold_rtx
2846 because fold_rtx might produce const_int, and then it's too late. */
2847 *pmode1 = GET_MODE (arg1), *pmode2 = GET_MODE (arg2);
2848 *parg1 = fold_rtx (arg1, 0), *parg2 = fold_rtx (arg2, 0);
2853 /* Try to simplify a unary operation CODE whose output mode is to be
2854 MODE with input operand OP whose mode was originally OP_MODE.
2855 Return zero if no simplification can be made. */
2858 simplify_unary_operation (code, mode, op, op_mode)
2860 enum machine_mode mode;
2862 enum machine_mode op_mode;
2864 register int width = GET_MODE_BITSIZE (mode);
2866 /* The order of these tests is critical so that, for example, we don't
2867 check the wrong mode (input vs. output) for a conversion operation,
2868 such as FIX. At some point, this should be simplified. */
2870 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
2871 if (code == FLOAT && GET_CODE (op) == CONST_INT)
2875 #ifdef REAL_ARITHMETIC
2876 REAL_VALUE_FROM_INT (d, INTVAL (op), INTVAL (op) < 0 ? ~0 : 0);
2878 d = (double) INTVAL (op);
2880 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2882 else if (code == UNSIGNED_FLOAT && GET_CODE (op) == CONST_INT)
2886 #ifdef REAL_ARITHMETIC
2887 REAL_VALUE_FROM_INT (d, INTVAL (op), 0);
2889 d = (double) (unsigned int) INTVAL (op);
2891 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2894 else if (code == FLOAT && GET_CODE (op) == CONST_DOUBLE
2895 && GET_MODE (op) == VOIDmode)
2899 #ifdef REAL_ARITHMETIC
2900 REAL_VALUE_FROM_INT (d, CONST_DOUBLE_LOW (op), CONST_DOUBLE_HIGH (op));
2902 if (CONST_DOUBLE_HIGH (op) < 0)
2904 d = (double) (~ CONST_DOUBLE_HIGH (op));
2905 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
2906 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
2907 d += (double) (unsigned HOST_WIDE_INT) (~ CONST_DOUBLE_LOW (op));
2912 d = (double) CONST_DOUBLE_HIGH (op);
2913 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
2914 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
2915 d += (double) (unsigned HOST_WIDE_INT) CONST_DOUBLE_LOW (op);
2917 #endif /* REAL_ARITHMETIC */
2918 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2920 else if (code == UNSIGNED_FLOAT && GET_CODE (op) == CONST_DOUBLE
2921 && GET_MODE (op) == VOIDmode)
2925 #ifdef REAL_ARITHMETIC
2926 REAL_VALUE_FROM_UNSIGNED_INT (d, CONST_DOUBLE_LOW (op),
2927 CONST_DOUBLE_HIGH (op));
2929 d = (double) CONST_DOUBLE_HIGH (op);
2930 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
2931 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
2932 d += (double) (unsigned HOST_WIDE_INT) CONST_DOUBLE_LOW (op);
2933 #endif /* REAL_ARITHMETIC */
2934 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2938 if (GET_CODE (op) == CONST_INT
2939 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
2941 register HOST_WIDE_INT arg0 = INTVAL (op);
2942 register HOST_WIDE_INT val;
2955 val = (arg0 >= 0 ? arg0 : - arg0);
2959 /* Don't use ffs here. Instead, get low order bit and then its
2960 number. If arg0 is zero, this will return 0, as desired. */
2961 arg0 &= GET_MODE_MASK (mode);
2962 val = exact_log2 (arg0 & (- arg0)) + 1;
2970 if (op_mode == VOIDmode)
2972 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
2974 /* If we were really extending the mode,
2975 we would have to distinguish between zero-extension
2976 and sign-extension. */
2977 if (width != GET_MODE_BITSIZE (op_mode))
2981 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
2982 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
2988 if (op_mode == VOIDmode)
2990 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
2992 /* If we were really extending the mode,
2993 we would have to distinguish between zero-extension
2994 and sign-extension. */
2995 if (width != GET_MODE_BITSIZE (op_mode))
2999 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
3002 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
3004 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
3005 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
3018 /* Clear the bits that don't belong in our mode,
3019 unless they and our sign bit are all one.
3020 So we get either a reasonable negative value or a reasonable
3021 unsigned value for this mode. */
3022 if (width < HOST_BITS_PER_WIDE_INT
3023 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3024 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3025 val &= (1 << width) - 1;
3027 return GEN_INT (val);
3030 /* We can do some operations on integer CONST_DOUBLEs. Also allow
3031 for a DImode operation on a CONST_INT. */
3032 else if (GET_MODE (op) == VOIDmode && width == HOST_BITS_PER_INT * 2
3033 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
3035 HOST_WIDE_INT l1, h1, lv, hv;
3037 if (GET_CODE (op) == CONST_DOUBLE)
3038 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
3040 l1 = INTVAL (op), h1 = l1 < 0 ? -1 : 0;
3050 neg_double (l1, h1, &lv, &hv);
3055 neg_double (l1, h1, &lv, &hv);
3063 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
3065 lv = exact_log2 (l1 & (-l1)) + 1;
3069 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3070 return GEN_INT (l1 & GET_MODE_MASK (mode));
3076 if (op_mode == VOIDmode
3077 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
3081 lv = l1 & GET_MODE_MASK (op_mode);
3085 if (op_mode == VOIDmode
3086 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
3090 lv = l1 & GET_MODE_MASK (op_mode);
3091 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
3092 && (lv & ((HOST_WIDE_INT) 1
3093 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
3094 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
3096 hv = (lv < 0) ? ~ (HOST_WIDE_INT) 0 : 0;
3107 return immed_double_const (lv, hv, mode);
3110 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3111 else if (GET_CODE (op) == CONST_DOUBLE
3112 && GET_MODE_CLASS (mode) == MODE_FLOAT)
3118 if (setjmp (handler))
3119 /* There used to be a warning here, but that is inadvisable.
3120 People may want to cause traps, and the natural way
3121 to do it should not get a warning. */
3124 set_float_handler (handler);
3126 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
3131 d = REAL_VALUE_NEGATE (d);
3135 if (REAL_VALUE_NEGATIVE (d))
3136 d = REAL_VALUE_NEGATE (d);
3139 case FLOAT_TRUNCATE:
3140 d = real_value_truncate (mode, d);
3144 /* All this does is change the mode. */
3148 d = REAL_VALUE_RNDZINT (d);
3152 d = REAL_VALUE_UNSIGNED_RNDZINT (d);
3162 x = immed_real_const_1 (d, mode);
3163 set_float_handler (NULL_PTR);
3166 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE_CLASS (mode) == MODE_INT
3167 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
3173 if (setjmp (handler))
3176 set_float_handler (handler);
3178 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
3183 val = REAL_VALUE_FIX (d);
3187 val = REAL_VALUE_UNSIGNED_FIX (d);
3194 set_float_handler (NULL_PTR);
3196 /* Clear the bits that don't belong in our mode,
3197 unless they and our sign bit are all one.
3198 So we get either a reasonable negative value or a reasonable
3199 unsigned value for this mode. */
3200 if (width < HOST_BITS_PER_WIDE_INT
3201 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3202 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3203 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3205 return GEN_INT (val);
3208 /* This was formerly used only for non-IEEE float.
3209 eggert@twinsun.com says it is safe for IEEE also. */
3212 /* There are some simplifications we can do even if the operands
3218 /* (not (not X)) == X, similarly for NEG. */
3219 if (GET_CODE (op) == code)
3220 return XEXP (op, 0);
3224 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
3225 becomes just the MINUS if its mode is MODE. This allows
3226 folding switch statements on machines using casesi (such as
3228 if (GET_CODE (op) == TRUNCATE
3229 && GET_MODE (XEXP (op, 0)) == mode
3230 && GET_CODE (XEXP (op, 0)) == MINUS
3231 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
3232 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
3233 return XEXP (op, 0);
3241 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
3242 and OP1. Return 0 if no simplification is possible.
3244 Don't use this for relational operations such as EQ or LT.
3245 Use simplify_relational_operation instead. */
3248 simplify_binary_operation (code, mode, op0, op1)
3250 enum machine_mode mode;
3253 register HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3255 int width = GET_MODE_BITSIZE (mode);
3258 /* Relational operations don't work here. We must know the mode
3259 of the operands in order to do the comparison correctly.
3260 Assuming a full word can give incorrect results.
3261 Consider comparing 128 with -128 in QImode. */
3263 if (GET_RTX_CLASS (code) == '<')
3266 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3267 if (GET_MODE_CLASS (mode) == MODE_FLOAT
3268 && GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
3269 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3271 REAL_VALUE_TYPE f0, f1, value;
3274 if (setjmp (handler))
3277 set_float_handler (handler);
3279 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3280 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3281 f0 = real_value_truncate (mode, f0);
3282 f1 = real_value_truncate (mode, f1);
3284 #ifdef REAL_ARITHMETIC
3285 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
3299 #ifndef REAL_INFINITY
3306 value = MIN (f0, f1);
3309 value = MAX (f0, f1);
3316 set_float_handler (NULL_PTR);
3317 value = real_value_truncate (mode, value);
3318 return immed_real_const_1 (value, mode);
3320 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3322 /* We can fold some multi-word operations. */
3323 if (GET_MODE_CLASS (mode) == MODE_INT
3324 && width == HOST_BITS_PER_WIDE_INT * 2
3325 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
3326 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
3328 HOST_WIDE_INT l1, l2, h1, h2, lv, hv;
3330 if (GET_CODE (op0) == CONST_DOUBLE)
3331 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
3333 l1 = INTVAL (op0), h1 = l1 < 0 ? -1 : 0;
3335 if (GET_CODE (op1) == CONST_DOUBLE)
3336 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
3338 l2 = INTVAL (op1), h2 = l2 < 0 ? -1 : 0;
3343 /* A - B == A + (-B). */
3344 neg_double (l2, h2, &lv, &hv);
3347 /* .. fall through ... */
3350 add_double (l1, h1, l2, h2, &lv, &hv);
3354 mul_double (l1, h1, l2, h2, &lv, &hv);
3357 case DIV: case MOD: case UDIV: case UMOD:
3358 /* We'd need to include tree.h to do this and it doesn't seem worth
3363 lv = l1 & l2, hv = h1 & h2;
3367 lv = l1 | l2, hv = h1 | h2;
3371 lv = l1 ^ l2, hv = h1 ^ h2;
3377 && ((unsigned HOST_WIDE_INT) l1
3378 < (unsigned HOST_WIDE_INT) l2)))
3387 && ((unsigned HOST_WIDE_INT) l1
3388 > (unsigned HOST_WIDE_INT) l2)))
3395 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
3397 && ((unsigned HOST_WIDE_INT) l1
3398 < (unsigned HOST_WIDE_INT) l2)))
3405 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
3407 && ((unsigned HOST_WIDE_INT) l1
3408 > (unsigned HOST_WIDE_INT) l2)))
3414 case LSHIFTRT: case ASHIFTRT:
3415 case ASHIFT: case LSHIFT:
3416 case ROTATE: case ROTATERT:
3417 #ifdef SHIFT_COUNT_TRUNCATED
3418 if (SHIFT_COUNT_TRUNCATED)
3419 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
3422 if (h2 != 0 || l2 < 0 || l2 >= GET_MODE_BITSIZE (mode))
3425 if (code == LSHIFTRT || code == ASHIFTRT)
3426 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
3428 else if (code == ASHIFT || code == LSHIFT)
3429 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
3431 else if (code == ROTATE)
3432 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3433 else /* code == ROTATERT */
3434 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3441 return immed_double_const (lv, hv, mode);
3444 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
3445 || width > HOST_BITS_PER_WIDE_INT || width == 0)
3447 /* Even if we can't compute a constant result,
3448 there are some cases worth simplifying. */
3453 /* In IEEE floating point, x+0 is not the same as x. Similarly
3454 for the other optimizations below. */
3455 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
3456 && FLOAT_MODE_P (mode))
3459 if (op1 == CONST0_RTX (mode))
3462 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
3463 if (GET_CODE (op0) == NEG)
3464 return cse_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
3465 else if (GET_CODE (op1) == NEG)
3466 return cse_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
3468 /* Handle both-operands-constant cases. We can only add
3469 CONST_INTs to constants since the sum of relocatable symbols
3470 can't be handled by most assemblers. Don't add CONST_INT
3471 to CONST_INT since overflow won't be computed properly if wider
3472 than HOST_BITS_PER_WIDE_INT. */
3474 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
3475 && GET_CODE (op1) == CONST_INT)
3476 return plus_constant (op0, INTVAL (op1));
3477 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
3478 && GET_CODE (op0) == CONST_INT)
3479 return plus_constant (op1, INTVAL (op0));
3481 /* See if this is something like X * C - X or vice versa or
3482 if the multiplication is written as a shift. If so, we can
3483 distribute and make a new multiply, shift, or maybe just
3484 have X (if C is 2 in the example above). But don't make
3485 real multiply if we didn't have one before. */
3487 if (! FLOAT_MODE_P (mode))
3489 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
3490 rtx lhs = op0, rhs = op1;
3493 if (GET_CODE (lhs) == NEG)
3494 coeff0 = -1, lhs = XEXP (lhs, 0);
3495 else if (GET_CODE (lhs) == MULT
3496 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
3498 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
3501 else if (GET_CODE (lhs) == ASHIFT
3502 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
3503 && INTVAL (XEXP (lhs, 1)) >= 0
3504 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
3506 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
3507 lhs = XEXP (lhs, 0);
3510 if (GET_CODE (rhs) == NEG)
3511 coeff1 = -1, rhs = XEXP (rhs, 0);
3512 else if (GET_CODE (rhs) == MULT
3513 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
3515 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
3518 else if (GET_CODE (rhs) == ASHIFT
3519 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
3520 && INTVAL (XEXP (rhs, 1)) >= 0
3521 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
3523 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
3524 rhs = XEXP (rhs, 0);
3527 if (rtx_equal_p (lhs, rhs))
3529 tem = cse_gen_binary (MULT, mode, lhs,
3530 GEN_INT (coeff0 + coeff1));
3531 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
3535 /* If one of the operands is a PLUS or a MINUS, see if we can
3536 simplify this by the associative law.
3537 Don't use the associative law for floating point.
3538 The inaccuracy makes it nonassociative,
3539 and subtle programs can break if operations are associated. */
3541 if (INTEGRAL_MODE_P (mode)
3542 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
3543 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
3544 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
3550 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
3551 using cc0, in which case we want to leave it as a COMPARE
3552 so we can distinguish it from a register-register-copy.
3554 In IEEE floating point, x-0 is not the same as x. */
3556 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3557 || ! FLOAT_MODE_P (mode))
3558 && op1 == CONST0_RTX (mode))
3561 /* Do nothing here. */
3566 /* None of these optimizations can be done for IEEE
3568 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
3569 && FLOAT_MODE_P (mode))
3572 /* We can't assume x-x is 0 even with non-IEEE floating point. */
3573 if (rtx_equal_p (op0, op1)
3574 && ! side_effects_p (op0)
3575 && ! FLOAT_MODE_P (mode))
3578 /* Change subtraction from zero into negation. */
3579 if (op0 == CONST0_RTX (mode))
3580 return gen_rtx (NEG, mode, op1);
3582 /* (-1 - a) is ~a. */
3583 if (op0 == constm1_rtx)
3584 return gen_rtx (NOT, mode, op1);
3586 /* Subtracting 0 has no effect. */
3587 if (op1 == CONST0_RTX (mode))
3590 /* See if this is something like X * C - X or vice versa or
3591 if the multiplication is written as a shift. If so, we can
3592 distribute and make a new multiply, shift, or maybe just
3593 have X (if C is 2 in the example above). But don't make
3594 real multiply if we didn't have one before. */
3596 if (! FLOAT_MODE_P (mode))
3598 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
3599 rtx lhs = op0, rhs = op1;
3602 if (GET_CODE (lhs) == NEG)
3603 coeff0 = -1, lhs = XEXP (lhs, 0);
3604 else if (GET_CODE (lhs) == MULT
3605 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
3607 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
3610 else if (GET_CODE (lhs) == ASHIFT
3611 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
3612 && INTVAL (XEXP (lhs, 1)) >= 0
3613 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
3615 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
3616 lhs = XEXP (lhs, 0);
3619 if (GET_CODE (rhs) == NEG)
3620 coeff1 = - 1, rhs = XEXP (rhs, 0);
3621 else if (GET_CODE (rhs) == MULT
3622 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
3624 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
3627 else if (GET_CODE (rhs) == ASHIFT
3628 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
3629 && INTVAL (XEXP (rhs, 1)) >= 0
3630 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
3632 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
3633 rhs = XEXP (rhs, 0);
3636 if (rtx_equal_p (lhs, rhs))
3638 tem = cse_gen_binary (MULT, mode, lhs,
3639 GEN_INT (coeff0 - coeff1));
3640 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
3644 /* (a - (-b)) -> (a + b). */
3645 if (GET_CODE (op1) == NEG)
3646 return cse_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
3648 /* If one of the operands is a PLUS or a MINUS, see if we can
3649 simplify this by the associative law.
3650 Don't use the associative law for floating point.
3651 The inaccuracy makes it nonassociative,
3652 and subtle programs can break if operations are associated. */
3654 if (INTEGRAL_MODE_P (mode)
3655 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
3656 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
3657 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
3660 /* Don't let a relocatable value get a negative coeff. */
3661 if (GET_CODE (op1) == CONST_INT && GET_MODE (op1) != VOIDmode)
3662 return plus_constant (op0, - INTVAL (op1));
3666 if (op1 == constm1_rtx)
3668 tem = simplify_unary_operation (NEG, mode, op0, mode);
3670 return tem ? tem : gen_rtx (NEG, mode, op0);
3673 /* In IEEE floating point, x*0 is not always 0. */
3674 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3675 && ! FLOAT_MODE_P (mode))
3676 && op1 == CONST0_RTX (mode)
3677 && ! side_effects_p (op0))
3680 /* In IEEE floating point, x*1 is not equivalent to x for nans.
3681 However, ANSI says we can drop signals,
3682 so we can do this anyway. */
3683 if (op1 == CONST1_RTX (mode))
3686 /* Convert multiply by constant power of two into shift. */
3687 if (GET_CODE (op1) == CONST_INT
3688 && (val = exact_log2 (INTVAL (op1))) >= 0)
3689 return gen_rtx (ASHIFT, mode, op0, GEN_INT (val));
3691 if (GET_CODE (op1) == CONST_DOUBLE
3692 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT)
3696 int op1is2, op1ism1;
3698 if (setjmp (handler))
3701 set_float_handler (handler);
3702 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
3703 op1is2 = REAL_VALUES_EQUAL (d, dconst2);
3704 op1ism1 = REAL_VALUES_EQUAL (d, dconstm1);
3705 set_float_handler (NULL_PTR);
3707 /* x*2 is x+x and x*(-1) is -x */
3708 if (op1is2 && GET_MODE (op0) == mode)
3709 return gen_rtx (PLUS, mode, op0, copy_rtx (op0));
3711 else if (op1ism1 && GET_MODE (op0) == mode)
3712 return gen_rtx (NEG, mode, op0);
3717 if (op1 == const0_rtx)
3719 if (GET_CODE (op1) == CONST_INT
3720 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
3722 if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
3724 /* A | (~A) -> -1 */
3725 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3726 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3727 && ! side_effects_p (op0)
3728 && GET_MODE_CLASS (mode) != MODE_CC)
3733 if (op1 == const0_rtx)
3735 if (GET_CODE (op1) == CONST_INT
3736 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
3737 return gen_rtx (NOT, mode, op0);
3738 if (op0 == op1 && ! side_effects_p (op0)
3739 && GET_MODE_CLASS (mode) != MODE_CC)
3744 if (op1 == const0_rtx && ! side_effects_p (op0))
3746 if (GET_CODE (op1) == CONST_INT
3747 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
3749 if (op0 == op1 && ! side_effects_p (op0)
3750 && GET_MODE_CLASS (mode) != MODE_CC)
3753 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3754 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3755 && ! side_effects_p (op0)
3756 && GET_MODE_CLASS (mode) != MODE_CC)
3761 /* Convert divide by power of two into shift (divide by 1 handled
3763 if (GET_CODE (op1) == CONST_INT
3764 && (arg1 = exact_log2 (INTVAL (op1))) > 0)
3765 return gen_rtx (LSHIFTRT, mode, op0, GEN_INT (arg1));
3767 /* ... fall through ... */
3770 if (op1 == CONST1_RTX (mode))
3773 /* In IEEE floating point, 0/x is not always 0. */
3774 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3775 || ! FLOAT_MODE_P (mode))
3776 && op0 == CONST0_RTX (mode)
3777 && ! side_effects_p (op1))
3780 #if 0 /* Turned off till an expert says this is a safe thing to do. */
3781 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3782 /* Change division by a constant into multiplication. */
3783 else if (GET_CODE (op1) == CONST_DOUBLE
3784 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT
3785 && op1 != CONST0_RTX (mode))
3788 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
3789 if (REAL_VALUES_EQUAL (d, dconst0))
3791 #if defined (REAL_ARITHMETIC)
3792 REAL_ARITHMETIC (d, (int) RDIV_EXPR, dconst1, d);
3793 return gen_rtx (MULT, mode, op0,
3794 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
3796 return gen_rtx (MULT, mode, op0,
3797 CONST_DOUBLE_FROM_REAL_VALUE (1./d, mode));
3805 /* Handle modulus by power of two (mod with 1 handled below). */
3806 if (GET_CODE (op1) == CONST_INT
3807 && exact_log2 (INTVAL (op1)) > 0)
3808 return gen_rtx (AND, mode, op0, GEN_INT (INTVAL (op1) - 1));
3810 /* ... fall through ... */
3813 if ((op0 == const0_rtx || op1 == const1_rtx)
3814 && ! side_effects_p (op0) && ! side_effects_p (op1))
3820 /* Rotating ~0 always results in ~0. */
3821 if (GET_CODE (op0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
3822 && INTVAL (op0) == GET_MODE_MASK (mode)
3823 && ! side_effects_p (op1))
3826 /* ... fall through ... */
3832 if (op1 == const0_rtx)
3834 if (op0 == const0_rtx && ! side_effects_p (op1))
3839 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
3840 && INTVAL (op1) == (HOST_WIDE_INT) 1 << (width -1)
3841 && ! side_effects_p (op0))
3843 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
3848 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
3850 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
3851 && ! side_effects_p (op0))
3853 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
3858 if (op1 == const0_rtx && ! side_effects_p (op0))
3860 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
3865 if (op1 == constm1_rtx && ! side_effects_p (op0))
3867 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
3878 /* Get the integer argument values in two forms:
3879 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3881 arg0 = INTVAL (op0);
3882 arg1 = INTVAL (op1);
3884 if (width < HOST_BITS_PER_WIDE_INT)
3886 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3887 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3890 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3891 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3894 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3895 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3903 /* Compute the value of the arithmetic. */
3908 val = arg0s + arg1s;
3912 val = arg0s - arg1s;
3916 val = arg0s * arg1s;
3922 val = arg0s / arg1s;
3928 val = arg0s % arg1s;
3934 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3940 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3956 /* If shift count is undefined, don't fold it; let the machine do
3957 what it wants. But truncate it if the machine will do that. */
3961 #ifdef SHIFT_COUNT_TRUNCATED
3962 if (SHIFT_COUNT_TRUNCATED)
3966 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
3974 #ifdef SHIFT_COUNT_TRUNCATED
3975 if (SHIFT_COUNT_TRUNCATED)
3979 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
3986 #ifdef SHIFT_COUNT_TRUNCATED
3987 if (SHIFT_COUNT_TRUNCATED)
3991 val = arg0s >> arg1;
3993 /* Bootstrap compiler may not have sign extended the right shift.
3994 Manually extend the sign to insure bootstrap cc matches gcc. */
3995 if (arg0s < 0 && arg1 > 0)
3996 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
4005 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
4006 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
4014 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
4015 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
4019 /* Do nothing here. */
4023 val = arg0s <= arg1s ? arg0s : arg1s;
4027 val = ((unsigned HOST_WIDE_INT) arg0
4028 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4032 val = arg0s > arg1s ? arg0s : arg1s;
4036 val = ((unsigned HOST_WIDE_INT) arg0
4037 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4044 /* Clear the bits that don't belong in our mode, unless they and our sign
4045 bit are all one. So we get either a reasonable negative value or a
4046 reasonable unsigned value for this mode. */
4047 if (width < HOST_BITS_PER_WIDE_INT
4048 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4049 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4050 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4052 return GEN_INT (val);
4055 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4058 Rather than test for specific case, we do this by a brute-force method
4059 and do all possible simplifications until no more changes occur. Then
4060 we rebuild the operation. */
4063 simplify_plus_minus (code, mode, op0, op1)
4065 enum machine_mode mode;
4071 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts = 0;
4072 int first = 1, negate = 0, changed;
4075 bzero (ops, sizeof ops);
4077 /* Set up the two operands and then expand them until nothing has been
4078 changed. If we run out of room in our array, give up; this should
4079 almost never happen. */
4081 ops[0] = op0, ops[1] = op1, negs[0] = 0, negs[1] = (code == MINUS);
4088 for (i = 0; i < n_ops; i++)
4089 switch (GET_CODE (ops[i]))
4096 ops[n_ops] = XEXP (ops[i], 1);
4097 negs[n_ops++] = GET_CODE (ops[i]) == MINUS ? !negs[i] : negs[i];
4098 ops[i] = XEXP (ops[i], 0);
4104 ops[i] = XEXP (ops[i], 0);
4105 negs[i] = ! negs[i];
4110 ops[i] = XEXP (ops[i], 0);
4116 /* ~a -> (-a - 1) */
4119 ops[n_ops] = constm1_rtx;
4120 negs[n_ops++] = negs[i];
4121 ops[i] = XEXP (ops[i], 0);
4122 negs[i] = ! negs[i];
4129 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0, changed = 1;
4134 /* If we only have two operands, we can't do anything. */
4138 /* Now simplify each pair of operands until nothing changes. The first
4139 time through just simplify constants against each other. */
4146 for (i = 0; i < n_ops - 1; i++)
4147 for (j = i + 1; j < n_ops; j++)
4148 if (ops[i] != 0 && ops[j] != 0
4149 && (! first || (CONSTANT_P (ops[i]) && CONSTANT_P (ops[j]))))
4151 rtx lhs = ops[i], rhs = ops[j];
4152 enum rtx_code ncode = PLUS;
4154 if (negs[i] && ! negs[j])
4155 lhs = ops[j], rhs = ops[i], ncode = MINUS;
4156 else if (! negs[i] && negs[j])
4159 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4162 ops[i] = tem, ops[j] = 0;
4163 negs[i] = negs[i] && negs[j];
4164 if (GET_CODE (tem) == NEG)
4165 ops[i] = XEXP (tem, 0), negs[i] = ! negs[i];
4167 if (GET_CODE (ops[i]) == CONST_INT && negs[i])
4168 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0;
4176 /* Pack all the operands to the lower-numbered entries and give up if
4177 we didn't reduce the number of operands we had. Make sure we
4178 count a CONST as two operands. If we have the same number of
4179 operands, but have made more CONSTs than we had, this is also
4180 an improvement, so accept it. */
4182 for (i = 0, j = 0; j < n_ops; j++)
4185 ops[i] = ops[j], negs[i++] = negs[j];
4186 if (GET_CODE (ops[j]) == CONST)
4190 if (i + n_consts > input_ops
4191 || (i + n_consts == input_ops && n_consts <= input_consts))
4196 /* If we have a CONST_INT, put it last. */
4197 for (i = 0; i < n_ops - 1; i++)
4198 if (GET_CODE (ops[i]) == CONST_INT)
4200 tem = ops[n_ops - 1], ops[n_ops - 1] = ops[i] , ops[i] = tem;
4201 j = negs[n_ops - 1], negs[n_ops - 1] = negs[i], negs[i] = j;
4204 /* Put a non-negated operand first. If there aren't any, make all
4205 operands positive and negate the whole thing later. */
4206 for (i = 0; i < n_ops && negs[i]; i++)
4211 for (i = 0; i < n_ops; i++)
4217 tem = ops[0], ops[0] = ops[i], ops[i] = tem;
4218 j = negs[0], negs[0] = negs[i], negs[i] = j;
4221 /* Now make the result by performing the requested operations. */
4223 for (i = 1; i < n_ops; i++)
4224 result = cse_gen_binary (negs[i] ? MINUS : PLUS, mode, result, ops[i]);
4226 return negate ? gen_rtx (NEG, mode, result) : result;
4229 /* Make a binary operation by properly ordering the operands and
4230 seeing if the expression folds. */
4233 cse_gen_binary (code, mode, op0, op1)
4235 enum machine_mode mode;
4240 /* Put complex operands first and constants second if commutative. */
4241 if (GET_RTX_CLASS (code) == 'c'
4242 && ((CONSTANT_P (op0) && GET_CODE (op1) != CONST_INT)
4243 || (GET_RTX_CLASS (GET_CODE (op0)) == 'o'
4244 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')
4245 || (GET_CODE (op0) == SUBREG
4246 && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op0))) == 'o'
4247 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')))
4248 tem = op0, op0 = op1, op1 = tem;
4250 /* If this simplifies, do it. */
4251 tem = simplify_binary_operation (code, mode, op0, op1);
4256 /* Handle addition and subtraction of CONST_INT specially. Otherwise,
4257 just form the operation. */
4259 if (code == PLUS && GET_CODE (op1) == CONST_INT
4260 && GET_MODE (op0) != VOIDmode)
4261 return plus_constant (op0, INTVAL (op1));
4262 else if (code == MINUS && GET_CODE (op1) == CONST_INT
4263 && GET_MODE (op0) != VOIDmode)
4264 return plus_constant (op0, - INTVAL (op1));
4266 return gen_rtx (code, mode, op0, op1);
4269 /* Like simplify_binary_operation except used for relational operators.
4270 MODE is the mode of the operands, not that of the result. */
4273 simplify_relational_operation (code, mode, op0, op1)
4275 enum machine_mode mode;
4278 register HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
4280 int width = GET_MODE_BITSIZE (mode);
4282 /* If op0 is a compare, extract the comparison arguments from it. */
4283 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4284 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
4286 /* What to do with MODE_CC isn't clear yet.
4287 Let's make sure nothing erroneous is done. */
4288 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
4291 /* Unlike the arithmetic operations, we can do the comparison whether
4292 or not WIDTH is larger than HOST_BITS_PER_WIDE_INT because the
4293 CONST_INTs are to be understood as being infinite precision as
4294 is the comparison. So there is no question of overflow. */
4296 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT || width == 0)
4298 /* Even if we can't compute a constant result,
4299 there are some cases worth simplifying. */
4301 /* For non-IEEE floating-point, if the two operands are equal, we know
4303 if (rtx_equal_p (op0, op1)
4304 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
4305 || ! FLOAT_MODE_P (GET_MODE (op0))))
4306 return (code == EQ || code == GE || code == LE || code == LEU
4307 || code == GEU) ? const_true_rtx : const0_rtx;
4309 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
4310 else if (GET_CODE (op0) == CONST_DOUBLE
4311 && GET_CODE (op1) == CONST_DOUBLE
4312 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
4314 REAL_VALUE_TYPE d0, d1;
4316 int op0lt, op1lt, equal;
4318 if (setjmp (handler))
4321 set_float_handler (handler);
4322 REAL_VALUE_FROM_CONST_DOUBLE (d0, op0);
4323 REAL_VALUE_FROM_CONST_DOUBLE (d1, op1);
4324 equal = REAL_VALUES_EQUAL (d0, d1);
4325 op0lt = REAL_VALUES_LESS (d0, d1);
4326 op1lt = REAL_VALUES_LESS (d1, d0);
4327 set_float_handler (NULL_PTR);
4332 return equal ? const_true_rtx : const0_rtx;
4334 return !equal ? const_true_rtx : const0_rtx;
4336 return equal || op0lt ? const_true_rtx : const0_rtx;
4338 return op0lt ? const_true_rtx : const0_rtx;
4340 return equal || op1lt ? const_true_rtx : const0_rtx;
4342 return op1lt ? const_true_rtx : const0_rtx;
4345 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
4347 else if (GET_MODE_CLASS (mode) == MODE_INT
4348 && width > HOST_BITS_PER_WIDE_INT
4349 && (GET_CODE (op0) == CONST_DOUBLE
4350 || GET_CODE (op0) == CONST_INT)
4351 && (GET_CODE (op1) == CONST_DOUBLE
4352 || GET_CODE (op1) == CONST_INT))
4354 HOST_WIDE_INT h0, l0, h1, l1;
4355 unsigned HOST_WIDE_INT uh0, ul0, uh1, ul1;
4356 int op0lt, op0ltu, equal;
4358 if (GET_CODE (op0) == CONST_DOUBLE)
4359 l0 = CONST_DOUBLE_LOW (op0), h0 = CONST_DOUBLE_HIGH (op0);
4361 l0 = INTVAL (op0), h0 = l0 < 0 ? -1 : 0;
4363 if (GET_CODE (op1) == CONST_DOUBLE)
4364 l1 = CONST_DOUBLE_LOW (op1), h1 = CONST_DOUBLE_HIGH (op1);
4366 l1 = INTVAL (op1), h1 = l1 < 0 ? -1 : 0;
4368 uh0 = h0, ul0 = l0, uh1 = h1, ul1 = l1;
4370 equal = (h0 == h1 && l0 == l1);
4371 op0lt = (h0 < h1 || (h0 == h1 && l0 < l1));
4372 op0ltu = (uh0 < uh1 || (uh0 == uh1 && ul0 < ul1));
4377 return equal ? const_true_rtx : const0_rtx;
4379 return !equal ? const_true_rtx : const0_rtx;
4381 return equal || op0lt ? const_true_rtx : const0_rtx;
4383 return op0lt ? const_true_rtx : const0_rtx;
4385 return !op0lt ? const_true_rtx : const0_rtx;
4387 return !equal && !op0lt ? const_true_rtx : const0_rtx;
4389 return equal || op0ltu ? const_true_rtx : const0_rtx;
4391 return op0ltu ? const_true_rtx : const0_rtx;
4393 return !op0ltu ? const_true_rtx : const0_rtx;
4395 return !equal && !op0ltu ? const_true_rtx : const0_rtx;
4404 /* We can't make this assumption due to #pragma weak */
4405 if (CONSTANT_P (op0) && op1 == const0_rtx)
4408 if (NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx
4409 /* On some machines, the ap reg can be 0 sometimes. */
4410 && op0 != arg_pointer_rtx)
4417 /* We can't make this assumption due to #pragma weak */
4418 if (CONSTANT_P (op0) && op1 == const0_rtx)
4419 return const_true_rtx;
4421 if (NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx
4422 /* On some machines, the ap reg can be 0 sometimes. */
4423 && op0 != arg_pointer_rtx)
4424 return const_true_rtx;
4428 /* Unsigned values are never negative, but we must be sure we are
4429 actually comparing a value, not a CC operand. */
4430 if (op1 == const0_rtx && INTEGRAL_MODE_P (mode))
4431 return const_true_rtx;
4435 if (op1 == const0_rtx && INTEGRAL_MODE_P (mode))
4440 /* Unsigned values are never greater than the largest
4442 if (GET_CODE (op1) == CONST_INT
4443 && INTVAL (op1) == GET_MODE_MASK (mode)
4444 && INTEGRAL_MODE_P (mode))
4445 return const_true_rtx;
4449 if (GET_CODE (op1) == CONST_INT
4450 && INTVAL (op1) == GET_MODE_MASK (mode)
4451 && INTEGRAL_MODE_P (mode))
4459 /* Get the integer argument values in two forms:
4460 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
4462 arg0 = INTVAL (op0);
4463 arg1 = INTVAL (op1);
4465 if (width < HOST_BITS_PER_WIDE_INT)
4467 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
4468 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
4471 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4472 arg0s |= ((HOST_WIDE_INT) (-1) << width);
4475 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4476 arg1s |= ((HOST_WIDE_INT) (-1) << width);
4484 /* Compute the value of the arithmetic. */
4489 val = arg0 != arg1 ? STORE_FLAG_VALUE : 0;
4493 val = arg0 == arg1 ? STORE_FLAG_VALUE : 0;
4497 val = arg0s <= arg1s ? STORE_FLAG_VALUE : 0;
4501 val = arg0s < arg1s ? STORE_FLAG_VALUE : 0;
4505 val = arg0s >= arg1s ? STORE_FLAG_VALUE : 0;
4509 val = arg0s > arg1s ? STORE_FLAG_VALUE : 0;
4513 val = (((unsigned HOST_WIDE_INT) arg0)
4514 <= ((unsigned HOST_WIDE_INT) arg1) ? STORE_FLAG_VALUE : 0);
4518 val = (((unsigned HOST_WIDE_INT) arg0)
4519 < ((unsigned HOST_WIDE_INT) arg1) ? STORE_FLAG_VALUE : 0);
4523 val = (((unsigned HOST_WIDE_INT) arg0)
4524 >= ((unsigned HOST_WIDE_INT) arg1) ? STORE_FLAG_VALUE : 0);
4528 val = (((unsigned HOST_WIDE_INT) arg0)
4529 > ((unsigned HOST_WIDE_INT) arg1) ? STORE_FLAG_VALUE : 0);
4536 /* Clear the bits that don't belong in our mode, unless they and our sign
4537 bit are all one. So we get either a reasonable negative value or a
4538 reasonable unsigned value for this mode. */
4539 if (width < HOST_BITS_PER_WIDE_INT
4540 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4541 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4542 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4544 return GEN_INT (val);
4547 /* Simplify CODE, an operation with result mode MODE and three operands,
4548 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4549 a constant. Return 0 if no simplifications is possible. */
4552 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
4554 enum machine_mode mode, op0_mode;
4557 int width = GET_MODE_BITSIZE (mode);
4559 /* VOIDmode means "infinite" precision. */
4561 width = HOST_BITS_PER_WIDE_INT;
4567 if (GET_CODE (op0) == CONST_INT
4568 && GET_CODE (op1) == CONST_INT
4569 && GET_CODE (op2) == CONST_INT
4570 && INTVAL (op1) + INTVAL (op2) <= GET_MODE_BITSIZE (op0_mode)
4571 && width <= HOST_BITS_PER_WIDE_INT)
4573 /* Extracting a bit-field from a constant */
4574 HOST_WIDE_INT val = INTVAL (op0);
4577 val >>= (GET_MODE_BITSIZE (op0_mode) - INTVAL (op2) - INTVAL (op1));
4579 val >>= INTVAL (op2);
4581 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4583 /* First zero-extend. */
4584 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4585 /* If desired, propagate sign bit. */
4586 if (code == SIGN_EXTRACT
4587 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4588 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4591 /* Clear the bits that don't belong in our mode,
4592 unless they and our sign bit are all one.
4593 So we get either a reasonable negative value or a reasonable
4594 unsigned value for this mode. */
4595 if (width < HOST_BITS_PER_WIDE_INT
4596 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4597 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4598 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4600 return GEN_INT (val);
4605 if (GET_CODE (op0) == CONST_INT)
4606 return op0 != const0_rtx ? op1 : op2;
4616 /* If X is a nontrivial arithmetic operation on an argument
4617 for which a constant value can be determined, return
4618 the result of operating on that value, as a constant.
4619 Otherwise, return X, possibly with one or more operands
4620 modified by recursive calls to this function.
4622 If X is a register whose contents are known, we do NOT
4623 return those contents here. equiv_constant is called to
4626 INSN is the insn that we may be modifying. If it is 0, make a copy
4627 of X before modifying it. */
4634 register enum rtx_code code;
4635 register enum machine_mode mode;
4642 /* Folded equivalents of first two operands of X. */
4646 /* Constant equivalents of first three operands of X;
4647 0 when no such equivalent is known. */
4652 /* The mode of the first operand of X. We need this for sign and zero
4654 enum machine_mode mode_arg0;
4659 mode = GET_MODE (x);
4660 code = GET_CODE (x);
4669 /* No use simplifying an EXPR_LIST
4670 since they are used only for lists of args
4671 in a function call's REG_EQUAL note. */
4677 return prev_insn_cc0;
4681 /* If the next insn is a CODE_LABEL followed by a jump table,
4682 PC's value is a LABEL_REF pointing to that label. That
4683 lets us fold switch statements on the Vax. */
4684 if (insn && GET_CODE (insn) == JUMP_INSN)
4686 rtx next = next_nonnote_insn (insn);
4688 if (next && GET_CODE (next) == CODE_LABEL
4689 && NEXT_INSN (next) != 0
4690 && GET_CODE (NEXT_INSN (next)) == JUMP_INSN
4691 && (GET_CODE (PATTERN (NEXT_INSN (next))) == ADDR_VEC
4692 || GET_CODE (PATTERN (NEXT_INSN (next))) == ADDR_DIFF_VEC))
4693 return gen_rtx (LABEL_REF, Pmode, next);
4698 /* See if we previously assigned a constant value to this SUBREG. */
4699 if ((new = lookup_as_function (x, CONST_INT)) != 0
4700 || (new = lookup_as_function (x, CONST_DOUBLE)) != 0)
4703 /* If this is a paradoxical SUBREG, we have no idea what value the
4704 extra bits would have. However, if the operand is equivalent
4705 to a SUBREG whose operand is the same as our mode, and all the
4706 modes are within a word, we can just use the inner operand
4707 because these SUBREGs just say how to treat the register.
4709 Similarly if we find an integer constant. */
4711 if (GET_MODE_SIZE (mode) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
4713 enum machine_mode imode = GET_MODE (SUBREG_REG (x));
4714 struct table_elt *elt;
4716 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
4717 && GET_MODE_SIZE (imode) <= UNITS_PER_WORD
4718 && (elt = lookup (SUBREG_REG (x), HASH (SUBREG_REG (x), imode),
4720 for (elt = elt->first_same_value;
4721 elt; elt = elt->next_same_value)
4723 if (CONSTANT_P (elt->exp)
4724 && GET_MODE (elt->exp) == VOIDmode)
4727 if (GET_CODE (elt->exp) == SUBREG
4728 && GET_MODE (SUBREG_REG (elt->exp)) == mode
4729 && exp_equiv_p (elt->exp, elt->exp, 1, 0))
4730 return copy_rtx (SUBREG_REG (elt->exp));
4736 /* Fold SUBREG_REG. If it changed, see if we can simplify the SUBREG.
4737 We might be able to if the SUBREG is extracting a single word in an
4738 integral mode or extracting the low part. */
4740 folded_arg0 = fold_rtx (SUBREG_REG (x), insn);
4741 const_arg0 = equiv_constant (folded_arg0);
4743 folded_arg0 = const_arg0;
4745 if (folded_arg0 != SUBREG_REG (x))
4749 if (GET_MODE_CLASS (mode) == MODE_INT
4750 && GET_MODE_SIZE (mode) == UNITS_PER_WORD
4751 && GET_MODE (SUBREG_REG (x)) != VOIDmode)
4752 new = operand_subword (folded_arg0, SUBREG_WORD (x), 0,
4753 GET_MODE (SUBREG_REG (x)));
4754 if (new == 0 && subreg_lowpart_p (x))
4755 new = gen_lowpart_if_possible (mode, folded_arg0);
4760 /* If this is a narrowing SUBREG and our operand is a REG, see if
4761 we can find an equivalence for REG that is an arithmetic operation
4762 in a wider mode where both operands are paradoxical SUBREGs
4763 from objects of our result mode. In that case, we couldn't report
4764 an equivalent value for that operation, since we don't know what the
4765 extra bits will be. But we can find an equivalence for this SUBREG
4766 by folding that operation is the narrow mode. This allows us to
4767 fold arithmetic in narrow modes when the machine only supports
4768 word-sized arithmetic.
4770 Also look for a case where we have a SUBREG whose operand is the
4771 same as our result. If both modes are smaller than a word, we
4772 are simply interpreting a register in different modes and we
4773 can use the inner value. */
4775 if (GET_CODE (folded_arg0) == REG
4776 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (folded_arg0))
4777 && subreg_lowpart_p (x))
4779 struct table_elt *elt;
4781 /* We can use HASH here since we know that canon_hash won't be
4783 elt = lookup (folded_arg0,
4784 HASH (folded_arg0, GET_MODE (folded_arg0)),
4785 GET_MODE (folded_arg0));
4788 elt = elt->first_same_value;
4790 for (; elt; elt = elt->next_same_value)
4792 enum rtx_code eltcode = GET_CODE (elt->exp);
4794 /* Just check for unary and binary operations. */
4795 if (GET_RTX_CLASS (GET_CODE (elt->exp)) == '1'
4796 && GET_CODE (elt->exp) != SIGN_EXTEND
4797 && GET_CODE (elt->exp) != ZERO_EXTEND
4798 && GET_CODE (XEXP (elt->exp, 0)) == SUBREG
4799 && GET_MODE (SUBREG_REG (XEXP (elt->exp, 0))) == mode)
4801 rtx op0 = SUBREG_REG (XEXP (elt->exp, 0));
4803 if (GET_CODE (op0) != REG && ! CONSTANT_P (op0))
4804 op0 = fold_rtx (op0, NULL_RTX);
4806 op0 = equiv_constant (op0);
4808 new = simplify_unary_operation (GET_CODE (elt->exp), mode,
4811 else if ((GET_RTX_CLASS (GET_CODE (elt->exp)) == '2'
4812 || GET_RTX_CLASS (GET_CODE (elt->exp)) == 'c')
4813 && eltcode != DIV && eltcode != MOD
4814 && eltcode != UDIV && eltcode != UMOD
4815 && eltcode != ASHIFTRT && eltcode != LSHIFTRT
4816 && eltcode != ROTATE && eltcode != ROTATERT
4817 && ((GET_CODE (XEXP (elt->exp, 0)) == SUBREG
4818 && (GET_MODE (SUBREG_REG (XEXP (elt->exp, 0)))
4820 || CONSTANT_P (XEXP (elt->exp, 0)))
4821 && ((GET_CODE (XEXP (elt->exp, 1)) == SUBREG
4822 && (GET_MODE (SUBREG_REG (XEXP (elt->exp, 1)))
4824 || CONSTANT_P (XEXP (elt->exp, 1))))
4826 rtx op0 = gen_lowpart_common (mode, XEXP (elt->exp, 0));
4827 rtx op1 = gen_lowpart_common (mode, XEXP (elt->exp, 1));
4829 if (op0 && GET_CODE (op0) != REG && ! CONSTANT_P (op0))
4830 op0 = fold_rtx (op0, NULL_RTX);
4833 op0 = equiv_constant (op0);
4835 if (op1 && GET_CODE (op1) != REG && ! CONSTANT_P (op1))
4836 op1 = fold_rtx (op1, NULL_RTX);
4839 op1 = equiv_constant (op1);
4841 /* If we are looking for the low SImode part of
4842 (ashift:DI c (const_int 32)), it doesn't work
4843 to compute that in SImode, because a 32-bit shift
4844 in SImode is unpredictable. We know the value is 0. */
4846 && (GET_CODE (elt->exp) == ASHIFT
4847 || GET_CODE (elt->exp) == LSHIFT)
4848 && GET_CODE (op1) == CONST_INT
4849 && INTVAL (op1) >= GET_MODE_BITSIZE (mode))
4851 if (INTVAL (op1) < GET_MODE_BITSIZE (GET_MODE (elt->exp)))
4853 /* If the count fits in the inner mode's width,
4854 but exceeds the outer mode's width,
4855 the value will get truncated to 0
4859 /* If the count exceeds even the inner mode's width,
4860 don't fold this expression. */
4863 else if (op0 && op1)
4864 new = simplify_binary_operation (GET_CODE (elt->exp), mode,
4868 else if (GET_CODE (elt->exp) == SUBREG
4869 && GET_MODE (SUBREG_REG (elt->exp)) == mode
4870 && (GET_MODE_SIZE (GET_MODE (folded_arg0))
4872 && exp_equiv_p (elt->exp, elt->exp, 1, 0))
4873 new = copy_rtx (SUBREG_REG (elt->exp));
4884 /* If we have (NOT Y), see if Y is known to be (NOT Z).
4885 If so, (NOT Y) simplifies to Z. Similarly for NEG. */
4886 new = lookup_as_function (XEXP (x, 0), code);
4888 return fold_rtx (copy_rtx (XEXP (new, 0)), insn);
4892 /* If we are not actually processing an insn, don't try to find the
4893 best address. Not only don't we care, but we could modify the
4894 MEM in an invalid way since we have no insn to validate against. */
4896 find_best_addr (insn, &XEXP (x, 0));
4899 /* Even if we don't fold in the insn itself,
4900 we can safely do so here, in hopes of getting a constant. */
4901 rtx addr = fold_rtx (XEXP (x, 0), NULL_RTX);
4903 HOST_WIDE_INT offset = 0;
4905 if (GET_CODE (addr) == REG
4906 && REGNO_QTY_VALID_P (REGNO (addr))
4907 && GET_MODE (addr) == qty_mode[reg_qty[REGNO (addr)]]
4908 && qty_const[reg_qty[REGNO (addr)]] != 0)
4909 addr = qty_const[reg_qty[REGNO (addr)]];
4911 /* If address is constant, split it into a base and integer offset. */
4912 if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == LABEL_REF)
4914 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
4915 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
4917 base = XEXP (XEXP (addr, 0), 0);
4918 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
4920 else if (GET_CODE (addr) == LO_SUM
4921 && GET_CODE (XEXP (addr, 1)) == SYMBOL_REF)
4922 base = XEXP (addr, 1);
4924 /* If this is a constant pool reference, we can fold it into its
4925 constant to allow better value tracking. */
4926 if (base && GET_CODE (base) == SYMBOL_REF
4927 && CONSTANT_POOL_ADDRESS_P (base))
4929 rtx constant = get_pool_constant (base);
4930 enum machine_mode const_mode = get_pool_mode (base);
4933 if (CONSTANT_P (constant) && GET_CODE (constant) != CONST_INT)
4934 constant_pool_entries_cost = COST (constant);
4936 /* If we are loading the full constant, we have an equivalence. */
4937 if (offset == 0 && mode == const_mode)
4940 /* If this actually isn't a constant (wierd!), we can't do
4941 anything. Otherwise, handle the two most common cases:
4942 extracting a word from a multi-word constant, and extracting
4943 the low-order bits. Other cases don't seem common enough to
4945 if (! CONSTANT_P (constant))
4948 if (GET_MODE_CLASS (mode) == MODE_INT
4949 && GET_MODE_SIZE (mode) == UNITS_PER_WORD
4950 && offset % UNITS_PER_WORD == 0
4951 && (new = operand_subword (constant,
4952 offset / UNITS_PER_WORD,
4953 0, const_mode)) != 0)
4956 if (((BYTES_BIG_ENDIAN
4957 && offset == GET_MODE_SIZE (GET_MODE (constant)) - 1)
4958 || (! BYTES_BIG_ENDIAN && offset == 0))
4959 && (new = gen_lowpart_if_possible (mode, constant)) != 0)
4963 /* If this is a reference to a label at a known position in a jump
4964 table, we also know its value. */
4965 if (base && GET_CODE (base) == LABEL_REF)
4967 rtx label = XEXP (base, 0);
4968 rtx table_insn = NEXT_INSN (label);
4970 if (table_insn && GET_CODE (table_insn) == JUMP_INSN
4971 && GET_CODE (PATTERN (table_insn)) == ADDR_VEC)
4973 rtx table = PATTERN (table_insn);
4976 && (offset / GET_MODE_SIZE (GET_MODE (table))
4977 < XVECLEN (table, 0)))
4978 return XVECEXP (table, 0,
4979 offset / GET_MODE_SIZE (GET_MODE (table)));
4981 if (table_insn && GET_CODE (table_insn) == JUMP_INSN
4982 && GET_CODE (PATTERN (table_insn)) == ADDR_DIFF_VEC)
4984 rtx table = PATTERN (table_insn);
4987 && (offset / GET_MODE_SIZE (GET_MODE (table))
4988 < XVECLEN (table, 1)))
4990 offset /= GET_MODE_SIZE (GET_MODE (table));
4991 new = gen_rtx (MINUS, Pmode, XVECEXP (table, 1, offset),
4994 if (GET_MODE (table) != Pmode)
4995 new = gen_rtx (TRUNCATE, GET_MODE (table), new);
5009 mode_arg0 = VOIDmode;
5011 /* Try folding our operands.
5012 Then see which ones have constant values known. */
5014 fmt = GET_RTX_FORMAT (code);
5015 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5018 rtx arg = XEXP (x, i);
5019 rtx folded_arg = arg, const_arg = 0;
5020 enum machine_mode mode_arg = GET_MODE (arg);
5021 rtx cheap_arg, expensive_arg;
5022 rtx replacements[2];
5025 /* Most arguments are cheap, so handle them specially. */
5026 switch (GET_CODE (arg))
5029 /* This is the same as calling equiv_constant; it is duplicated
5031 if (REGNO_QTY_VALID_P (REGNO (arg))
5032 && qty_const[reg_qty[REGNO (arg)]] != 0
5033 && GET_CODE (qty_const[reg_qty[REGNO (arg)]]) != REG
5034 && GET_CODE (qty_const[reg_qty[REGNO (arg)]]) != PLUS)
5036 = gen_lowpart_if_possible (GET_MODE (arg),
5037 qty_const[reg_qty[REGNO (arg)]]);
5050 folded_arg = prev_insn_cc0;
5051 mode_arg = prev_insn_cc0_mode;
5052 const_arg = equiv_constant (folded_arg);
5057 folded_arg = fold_rtx (arg, insn);
5058 const_arg = equiv_constant (folded_arg);
5061 /* For the first three operands, see if the operand
5062 is constant or equivalent to a constant. */
5066 folded_arg0 = folded_arg;
5067 const_arg0 = const_arg;
5068 mode_arg0 = mode_arg;
5071 folded_arg1 = folded_arg;
5072 const_arg1 = const_arg;
5075 const_arg2 = const_arg;
5079 /* Pick the least expensive of the folded argument and an
5080 equivalent constant argument. */
5081 if (const_arg == 0 || const_arg == folded_arg
5082 || COST (const_arg) > COST (folded_arg))
5083 cheap_arg = folded_arg, expensive_arg = const_arg;
5085 cheap_arg = const_arg, expensive_arg = folded_arg;
5087 /* Try to replace the operand with the cheapest of the two
5088 possibilities. If it doesn't work and this is either of the first
5089 two operands of a commutative operation, try swapping them.
5090 If THAT fails, try the more expensive, provided it is cheaper
5091 than what is already there. */
5093 if (cheap_arg == XEXP (x, i))
5096 if (insn == 0 && ! copied)
5102 replacements[0] = cheap_arg, replacements[1] = expensive_arg;
5104 j < 2 && replacements[j]
5105 && COST (replacements[j]) < COST (XEXP (x, i));
5108 if (validate_change (insn, &XEXP (x, i), replacements[j], 0))
5111 if (code == NE || code == EQ || GET_RTX_CLASS (code) == 'c')
5113 validate_change (insn, &XEXP (x, i), XEXP (x, 1 - i), 1);
5114 validate_change (insn, &XEXP (x, 1 - i), replacements[j], 1);
5116 if (apply_change_group ())
5118 /* Swap them back to be invalid so that this loop can
5119 continue and flag them to be swapped back later. */
5122 tem = XEXP (x, 0); XEXP (x, 0) = XEXP (x, 1);
5131 else if (fmt[i] == 'E')
5132 /* Don't try to fold inside of a vector of expressions.
5133 Doing nothing is harmless. */
5136 /* If a commutative operation, place a constant integer as the second
5137 operand unless the first operand is also a constant integer. Otherwise,
5138 place any constant second unless the first operand is also a constant. */
5140 if (code == EQ || code == NE || GET_RTX_CLASS (code) == 'c')
5142 if (must_swap || (const_arg0
5144 || (GET_CODE (const_arg0) == CONST_INT
5145 && GET_CODE (const_arg1) != CONST_INT))))
5147 register rtx tem = XEXP (x, 0);
5149 if (insn == 0 && ! copied)
5155 validate_change (insn, &XEXP (x, 0), XEXP (x, 1), 1);
5156 validate_change (insn, &XEXP (x, 1), tem, 1);
5157 if (apply_change_group ())
5159 tem = const_arg0, const_arg0 = const_arg1, const_arg1 = tem;
5160 tem = folded_arg0, folded_arg0 = folded_arg1, folded_arg1 = tem;
5165 /* If X is an arithmetic operation, see if we can simplify it. */
5167 switch (GET_RTX_CLASS (code))
5170 /* We can't simplify extension ops unless we know the original mode. */
5171 if ((code == ZERO_EXTEND || code == SIGN_EXTEND)
5172 && mode_arg0 == VOIDmode)
5174 new = simplify_unary_operation (code, mode,
5175 const_arg0 ? const_arg0 : folded_arg0,
5180 /* See what items are actually being compared and set FOLDED_ARG[01]
5181 to those values and CODE to the actual comparison code. If any are
5182 constant, set CONST_ARG0 and CONST_ARG1 appropriately. We needn't
5183 do anything if both operands are already known to be constant. */
5185 if (const_arg0 == 0 || const_arg1 == 0)
5187 struct table_elt *p0, *p1;
5188 rtx true = const_true_rtx, false = const0_rtx;
5189 enum machine_mode mode_arg1;
5191 #ifdef FLOAT_STORE_FLAG_VALUE
5192 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5194 true = immed_real_const_1 (FLOAT_STORE_FLAG_VALUE, mode);
5195 false = CONST0_RTX (mode);
5199 code = find_comparison_args (code, &folded_arg0, &folded_arg1,
5200 &mode_arg0, &mode_arg1);
5201 const_arg0 = equiv_constant (folded_arg0);
5202 const_arg1 = equiv_constant (folded_arg1);
5204 /* If the mode is VOIDmode or a MODE_CC mode, we don't know
5205 what kinds of things are being compared, so we can't do
5206 anything with this comparison. */
5208 if (mode_arg0 == VOIDmode || GET_MODE_CLASS (mode_arg0) == MODE_CC)
5211 /* If we do not now have two constants being compared, see if we
5212 can nevertheless deduce some things about the comparison. */
5213 if (const_arg0 == 0 || const_arg1 == 0)
5215 /* Is FOLDED_ARG0 frame-pointer plus a constant? Or non-explicit
5216 constant? These aren't zero, but we don't know their sign. */
5217 if (const_arg1 == const0_rtx
5218 && (NONZERO_BASE_PLUS_P (folded_arg0)
5219 #if 0 /* Sad to say, on sysvr4, #pragma weak can make a symbol address
5221 || GET_CODE (folded_arg0) == SYMBOL_REF
5223 || GET_CODE (folded_arg0) == LABEL_REF
5224 || GET_CODE (folded_arg0) == CONST))
5228 else if (code == NE)
5232 /* See if the two operands are the same. We don't do this
5233 for IEEE floating-point since we can't assume x == x
5234 since x might be a NaN. */
5236 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
5237 || ! FLOAT_MODE_P (mode_arg0))
5238 && (folded_arg0 == folded_arg1
5239 || (GET_CODE (folded_arg0) == REG
5240 && GET_CODE (folded_arg1) == REG
5241 && (reg_qty[REGNO (folded_arg0)]
5242 == reg_qty[REGNO (folded_arg1)]))
5243 || ((p0 = lookup (folded_arg0,
5244 (safe_hash (folded_arg0, mode_arg0)
5245 % NBUCKETS), mode_arg0))
5246 && (p1 = lookup (folded_arg1,
5247 (safe_hash (folded_arg1, mode_arg0)
5248 % NBUCKETS), mode_arg0))
5249 && p0->first_same_value == p1->first_same_value)))
5250 return ((code == EQ || code == LE || code == GE
5251 || code == LEU || code == GEU)
5254 /* If FOLDED_ARG0 is a register, see if the comparison we are
5255 doing now is either the same as we did before or the reverse
5256 (we only check the reverse if not floating-point). */
5257 else if (GET_CODE (folded_arg0) == REG)
5259 int qty = reg_qty[REGNO (folded_arg0)];
5261 if (REGNO_QTY_VALID_P (REGNO (folded_arg0))
5262 && (comparison_dominates_p (qty_comparison_code[qty], code)
5263 || (comparison_dominates_p (qty_comparison_code[qty],
5264 reverse_condition (code))
5265 && ! FLOAT_MODE_P (mode_arg0)))
5266 && (rtx_equal_p (qty_comparison_const[qty], folded_arg1)
5268 && rtx_equal_p (qty_comparison_const[qty],
5270 || (GET_CODE (folded_arg1) == REG
5271 && (reg_qty[REGNO (folded_arg1)]
5272 == qty_comparison_qty[qty]))))
5273 return (comparison_dominates_p (qty_comparison_code[qty],
5280 /* If we are comparing against zero, see if the first operand is
5281 equivalent to an IOR with a constant. If so, we may be able to
5282 determine the result of this comparison. */
5284 if (const_arg1 == const0_rtx)
5286 rtx y = lookup_as_function (folded_arg0, IOR);
5290 && (inner_const = equiv_constant (XEXP (y, 1))) != 0
5291 && GET_CODE (inner_const) == CONST_INT
5292 && INTVAL (inner_const) != 0)
5294 int sign_bitnum = GET_MODE_BITSIZE (mode_arg0) - 1;
5295 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5296 && (INTVAL (inner_const)
5297 & ((HOST_WIDE_INT) 1 << sign_bitnum)));
5298 rtx true = const_true_rtx, false = const0_rtx;
5300 #ifdef FLOAT_STORE_FLAG_VALUE
5301 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5303 true = immed_real_const_1 (FLOAT_STORE_FLAG_VALUE, mode);
5304 false = CONST0_RTX (mode);
5326 new = simplify_relational_operation (code, mode_arg0,
5327 const_arg0 ? const_arg0 : folded_arg0,
5328 const_arg1 ? const_arg1 : folded_arg1);
5329 #ifdef FLOAT_STORE_FLAG_VALUE
5330 if (new != 0 && GET_MODE_CLASS (mode) == MODE_FLOAT)
5331 new = ((new == const0_rtx) ? CONST0_RTX (mode)
5332 : immed_real_const_1 (FLOAT_STORE_FLAG_VALUE, mode));
5341 /* If the second operand is a LABEL_REF, see if the first is a MINUS
5342 with that LABEL_REF as its second operand. If so, the result is
5343 the first operand of that MINUS. This handles switches with an
5344 ADDR_DIFF_VEC table. */
5345 if (const_arg1 && GET_CODE (const_arg1) == LABEL_REF)
5347 rtx y = lookup_as_function (folded_arg0, MINUS);
5349 if (y != 0 && GET_CODE (XEXP (y, 1)) == LABEL_REF
5350 && XEXP (XEXP (y, 1), 0) == XEXP (const_arg1, 0))
5356 /* If we have (MINUS Y C), see if Y is known to be (PLUS Z C2).
5357 If so, produce (PLUS Z C2-C). */
5358 if (const_arg1 != 0 && GET_CODE (const_arg1) == CONST_INT)
5360 rtx y = lookup_as_function (XEXP (x, 0), PLUS);
5361 if (y && GET_CODE (XEXP (y, 1)) == CONST_INT)
5362 return fold_rtx (plus_constant (copy_rtx (y),
5363 -INTVAL (const_arg1)),
5367 /* ... fall through ... */
5370 case SMIN: case SMAX: case UMIN: case UMAX:
5371 case IOR: case AND: case XOR:
5372 case MULT: case DIV: case UDIV:
5373 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
5374 /* If we have (<op> <reg> <const_int>) for an associative OP and REG
5375 is known to be of similar form, we may be able to replace the
5376 operation with a combined operation. This may eliminate the
5377 intermediate operation if every use is simplified in this way.
5378 Note that the similar optimization done by combine.c only works
5379 if the intermediate operation's result has only one reference. */
5381 if (GET_CODE (folded_arg0) == REG
5382 && const_arg1 && GET_CODE (const_arg1) == CONST_INT)
5385 = (code == ASHIFT || code == ASHIFTRT || code == LSHIFTRT);
5386 rtx y = lookup_as_function (folded_arg0, code);
5388 enum rtx_code associate_code;
5392 || 0 == (inner_const
5393 = equiv_constant (fold_rtx (XEXP (y, 1), 0)))
5394 || GET_CODE (inner_const) != CONST_INT
5395 /* If we have compiled a statement like
5396 "if (x == (x & mask1))", and now are looking at
5397 "x & mask2", we will have a case where the first operand
5398 of Y is the same as our first operand. Unless we detect
5399 this case, an infinite loop will result. */
5400 || XEXP (y, 0) == folded_arg0)
5403 /* Don't associate these operations if they are a PLUS with the
5404 same constant and it is a power of two. These might be doable
5405 with a pre- or post-increment. Similarly for two subtracts of
5406 identical powers of two with post decrement. */
5408 if (code == PLUS && INTVAL (const_arg1) == INTVAL (inner_const)
5410 #if defined(HAVE_PRE_INCREMENT) || defined(HAVE_POST_INCREMENT)
5411 || exact_log2 (INTVAL (const_arg1)) >= 0
5413 #if defined(HAVE_PRE_DECREMENT) || defined(HAVE_POST_DECREMENT)
5414 || exact_log2 (- INTVAL (const_arg1)) >= 0
5419 /* Compute the code used to compose the constants. For example,
5420 A/C1/C2 is A/(C1 * C2), so if CODE == DIV, we want MULT. */
5423 = (code == MULT || code == DIV || code == UDIV ? MULT
5424 : is_shift || code == PLUS || code == MINUS ? PLUS : code);
5426 new_const = simplify_binary_operation (associate_code, mode,
5427 const_arg1, inner_const);
5432 /* If we are associating shift operations, don't let this
5433 produce a shift of the size of the object or larger.
5434 This could occur when we follow a sign-extend by a right
5435 shift on a machine that does a sign-extend as a pair
5438 if (is_shift && GET_CODE (new_const) == CONST_INT
5439 && INTVAL (new_const) >= GET_MODE_BITSIZE (mode))
5441 /* As an exception, we can turn an ASHIFTRT of this
5442 form into a shift of the number of bits - 1. */
5443 if (code == ASHIFTRT)
5444 new_const = GEN_INT (GET_MODE_BITSIZE (mode) - 1);
5449 y = copy_rtx (XEXP (y, 0));
5451 /* If Y contains our first operand (the most common way this
5452 can happen is if Y is a MEM), we would do into an infinite
5453 loop if we tried to fold it. So don't in that case. */
5455 if (! reg_mentioned_p (folded_arg0, y))
5456 y = fold_rtx (y, insn);
5458 return cse_gen_binary (code, mode, y, new_const);
5462 new = simplify_binary_operation (code, mode,
5463 const_arg0 ? const_arg0 : folded_arg0,
5464 const_arg1 ? const_arg1 : folded_arg1);
5468 /* (lo_sum (high X) X) is simply X. */
5469 if (code == LO_SUM && const_arg0 != 0
5470 && GET_CODE (const_arg0) == HIGH
5471 && rtx_equal_p (XEXP (const_arg0, 0), const_arg1))
5477 new = simplify_ternary_operation (code, mode, mode_arg0,
5478 const_arg0 ? const_arg0 : folded_arg0,
5479 const_arg1 ? const_arg1 : folded_arg1,
5480 const_arg2 ? const_arg2 : XEXP (x, 2));
5484 return new ? new : x;
5487 /* Return a constant value currently equivalent to X.
5488 Return 0 if we don't know one. */
5494 if (GET_CODE (x) == REG
5495 && REGNO_QTY_VALID_P (REGNO (x))
5496 && qty_const[reg_qty[REGNO (x)]])
5497 x = gen_lowpart_if_possible (GET_MODE (x), qty_const[reg_qty[REGNO (x)]]);
5499 if (x != 0 && CONSTANT_P (x))
5502 /* If X is a MEM, try to fold it outside the context of any insn to see if
5503 it might be equivalent to a constant. That handles the case where it
5504 is a constant-pool reference. Then try to look it up in the hash table
5505 in case it is something whose value we have seen before. */
5507 if (GET_CODE (x) == MEM)
5509 struct table_elt *elt;
5511 x = fold_rtx (x, NULL_RTX);
5515 elt = lookup (x, safe_hash (x, GET_MODE (x)) % NBUCKETS, GET_MODE (x));
5519 for (elt = elt->first_same_value; elt; elt = elt->next_same_value)
5520 if (elt->is_const && CONSTANT_P (elt->exp))
5527 /* Assuming that X is an rtx (e.g., MEM, REG or SUBREG) for a fixed-point
5528 number, return an rtx (MEM, SUBREG, or CONST_INT) that refers to the
5529 least-significant part of X.
5530 MODE specifies how big a part of X to return.
5532 If the requested operation cannot be done, 0 is returned.
5534 This is similar to gen_lowpart in emit-rtl.c. */
5537 gen_lowpart_if_possible (mode, x)
5538 enum machine_mode mode;
5541 rtx result = gen_lowpart_common (mode, x);
5545 else if (GET_CODE (x) == MEM)
5547 /* This is the only other case we handle. */
5548 register int offset = 0;
5551 #if WORDS_BIG_ENDIAN
5552 offset = (MAX (GET_MODE_SIZE (GET_MODE (x)), UNITS_PER_WORD)
5553 - MAX (GET_MODE_SIZE (mode), UNITS_PER_WORD));
5555 #if BYTES_BIG_ENDIAN
5556 /* Adjust the address so that the address-after-the-data
5558 offset -= (MIN (UNITS_PER_WORD, GET_MODE_SIZE (mode))
5559 - MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (x))));
5561 new = gen_rtx (MEM, mode, plus_constant (XEXP (x, 0), offset));
5562 if (! memory_address_p (mode, XEXP (new, 0)))
5564 MEM_VOLATILE_P (new) = MEM_VOLATILE_P (x);
5565 RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (x);
5566 MEM_IN_STRUCT_P (new) = MEM_IN_STRUCT_P (x);
5573 /* Given INSN, a jump insn, TAKEN indicates if we are following the "taken"
5574 branch. It will be zero if not.
5576 In certain cases, this can cause us to add an equivalence. For example,
5577 if we are following the taken case of
5579 we can add the fact that `i' and '2' are now equivalent.
5581 In any case, we can record that this comparison was passed. If the same
5582 comparison is seen later, we will know its value. */
5585 record_jump_equiv (insn, taken)
5589 int cond_known_true;
5591 enum machine_mode mode, mode0, mode1;
5592 int reversed_nonequality = 0;
5595 /* Ensure this is the right kind of insn. */
5596 if (! condjump_p (insn) || simplejump_p (insn))
5599 /* See if this jump condition is known true or false. */
5601 cond_known_true = (XEXP (SET_SRC (PATTERN (insn)), 2) == pc_rtx);
5603 cond_known_true = (XEXP (SET_SRC (PATTERN (insn)), 1) == pc_rtx);
5605 /* Get the type of comparison being done and the operands being compared.
5606 If we had to reverse a non-equality condition, record that fact so we
5607 know that it isn't valid for floating-point. */
5608 code = GET_CODE (XEXP (SET_SRC (PATTERN (insn)), 0));
5609 op0 = fold_rtx (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 0), insn);
5610 op1 = fold_rtx (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 1), insn);
5612 code = find_comparison_args (code, &op0, &op1, &mode0, &mode1);
5613 if (! cond_known_true)
5615 reversed_nonequality = (code != EQ && code != NE);
5616 code = reverse_condition (code);
5619 /* The mode is the mode of the non-constant. */
5621 if (mode1 != VOIDmode)
5624 record_jump_cond (code, mode, op0, op1, reversed_nonequality);
5627 /* We know that comparison CODE applied to OP0 and OP1 in MODE is true.
5628 REVERSED_NONEQUALITY is nonzero if CODE had to be swapped.
5629 Make any useful entries we can with that information. Called from
5630 above function and called recursively. */
5633 record_jump_cond (code, mode, op0, op1, reversed_nonequality)
5635 enum machine_mode mode;
5637 int reversed_nonequality;
5639 int op0_hash_code, op1_hash_code;
5640 int op0_in_memory, op0_in_struct, op1_in_memory, op1_in_struct;
5641 struct table_elt *op0_elt, *op1_elt;
5643 /* If OP0 and OP1 are known equal, and either is a paradoxical SUBREG,
5644 we know that they are also equal in the smaller mode (this is also
5645 true for all smaller modes whether or not there is a SUBREG, but
5646 is not worth testing for with no SUBREG. */
5648 /* Note that GET_MODE (op0) may not equal MODE. */
5649 if (code == EQ && GET_CODE (op0) == SUBREG
5650 && (GET_MODE_SIZE (GET_MODE (op0))
5651 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
5653 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
5654 rtx tem = gen_lowpart_if_possible (inner_mode, op1);
5656 record_jump_cond (code, mode, SUBREG_REG (op0),
5657 tem ? tem : gen_rtx (SUBREG, inner_mode, op1, 0),
5658 reversed_nonequality);
5661 if (code == EQ && GET_CODE (op1) == SUBREG
5662 && (GET_MODE_SIZE (GET_MODE (op1))
5663 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op1)))))
5665 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op1));
5666 rtx tem = gen_lowpart_if_possible (inner_mode, op0);
5668 record_jump_cond (code, mode, SUBREG_REG (op1),
5669 tem ? tem : gen_rtx (SUBREG, inner_mode, op0, 0),
5670 reversed_nonequality);
5673 /* Similarly, if this is an NE comparison, and either is a SUBREG
5674 making a smaller mode, we know the whole thing is also NE. */
5676 /* Note that GET_MODE (op0) may not equal MODE;
5677 if we test MODE instead, we can get an infinite recursion
5678 alternating between two modes each wider than MODE. */
5680 if (code == NE && GET_CODE (op0) == SUBREG
5681 && subreg_lowpart_p (op0)
5682 && (GET_MODE_SIZE (GET_MODE (op0))
5683 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
5685 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
5686 rtx tem = gen_lowpart_if_possible (inner_mode, op1);
5688 record_jump_cond (code, mode, SUBREG_REG (op0),
5689 tem ? tem : gen_rtx (SUBREG, inner_mode, op1, 0),
5690 reversed_nonequality);
5693 if (code == NE && GET_CODE (op1) == SUBREG
5694 && subreg_lowpart_p (op1)
5695 && (GET_MODE_SIZE (GET_MODE (op1))
5696 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op1)))))
5698 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op1));
5699 rtx tem = gen_lowpart_if_possible (inner_mode, op0);
5701 record_jump_cond (code, mode, SUBREG_REG (op1),
5702 tem ? tem : gen_rtx (SUBREG, inner_mode, op0, 0),
5703 reversed_nonequality);
5706 /* Hash both operands. */
5709 hash_arg_in_memory = 0;
5710 hash_arg_in_struct = 0;
5711 op0_hash_code = HASH (op0, mode);
5712 op0_in_memory = hash_arg_in_memory;
5713 op0_in_struct = hash_arg_in_struct;
5719 hash_arg_in_memory = 0;
5720 hash_arg_in_struct = 0;
5721 op1_hash_code = HASH (op1, mode);
5722 op1_in_memory = hash_arg_in_memory;
5723 op1_in_struct = hash_arg_in_struct;
5728 /* Look up both operands. */
5729 op0_elt = lookup (op0, op0_hash_code, mode);
5730 op1_elt = lookup (op1, op1_hash_code, mode);
5732 /* If we aren't setting two things equal all we can do is save this
5733 comparison. Similarly if this is floating-point. In the latter
5734 case, OP1 might be zero and both -0.0 and 0.0 are equal to it.
5735 If we record the equality, we might inadvertently delete code
5736 whose intent was to change -0 to +0. */
5738 if (code != EQ || FLOAT_MODE_P (GET_MODE (op0)))
5740 /* If we reversed a floating-point comparison, if OP0 is not a
5741 register, or if OP1 is neither a register or constant, we can't
5744 if (GET_CODE (op1) != REG)
5745 op1 = equiv_constant (op1);
5747 if ((reversed_nonequality && FLOAT_MODE_P (mode))
5748 || GET_CODE (op0) != REG || op1 == 0)
5751 /* Put OP0 in the hash table if it isn't already. This gives it a
5752 new quantity number. */
5755 if (insert_regs (op0, NULL_PTR, 0))
5757 rehash_using_reg (op0);
5758 op0_hash_code = HASH (op0, mode);
5760 /* If OP0 is contained in OP1, this changes its hash code
5761 as well. Faster to rehash than to check, except
5762 for the simple case of a constant. */
5763 if (! CONSTANT_P (op1))
5764 op1_hash_code = HASH (op1,mode);
5767 op0_elt = insert (op0, NULL_PTR, op0_hash_code, mode);
5768 op0_elt->in_memory = op0_in_memory;
5769 op0_elt->in_struct = op0_in_struct;
5772 qty_comparison_code[reg_qty[REGNO (op0)]] = code;
5773 if (GET_CODE (op1) == REG)
5775 /* Look it up again--in case op0 and op1 are the same. */
5776 op1_elt = lookup (op1, op1_hash_code, mode);
5778 /* Put OP1 in the hash table so it gets a new quantity number. */
5781 if (insert_regs (op1, NULL_PTR, 0))
5783 rehash_using_reg (op1);
5784 op1_hash_code = HASH (op1, mode);
5787 op1_elt = insert (op1, NULL_PTR, op1_hash_code, mode);
5788 op1_elt->in_memory = op1_in_memory;
5789 op1_elt->in_struct = op1_in_struct;
5792 qty_comparison_qty[reg_qty[REGNO (op0)]] = reg_qty[REGNO (op1)];
5793 qty_comparison_const[reg_qty[REGNO (op0)]] = 0;
5797 qty_comparison_qty[reg_qty[REGNO (op0)]] = -1;
5798 qty_comparison_const[reg_qty[REGNO (op0)]] = op1;
5804 /* If either side is still missing an equivalence, make it now,
5805 then merge the equivalences. */
5809 if (insert_regs (op0, NULL_PTR, 0))
5811 rehash_using_reg (op0);
5812 op0_hash_code = HASH (op0, mode);
5815 op0_elt = insert (op0, NULL_PTR, op0_hash_code, mode);
5816 op0_elt->in_memory = op0_in_memory;
5817 op0_elt->in_struct = op0_in_struct;
5822 if (insert_regs (op1, NULL_PTR, 0))
5824 rehash_using_reg (op1);
5825 op1_hash_code = HASH (op1, mode);
5828 op1_elt = insert (op1, NULL_PTR, op1_hash_code, mode);
5829 op1_elt->in_memory = op1_in_memory;
5830 op1_elt->in_struct = op1_in_struct;
5833 merge_equiv_classes (op0_elt, op1_elt);
5834 last_jump_equiv_class = op0_elt;
5837 /* CSE processing for one instruction.
5838 First simplify sources and addresses of all assignments
5839 in the instruction, using previously-computed equivalents values.
5840 Then install the new sources and destinations in the table
5841 of available values.
5843 If IN_LIBCALL_BLOCK is nonzero, don't record any equivalence made in
5846 /* Data on one SET contained in the instruction. */
5850 /* The SET rtx itself. */
5852 /* The SET_SRC of the rtx (the original value, if it is changing). */
5854 /* The hash-table element for the SET_SRC of the SET. */
5855 struct table_elt *src_elt;
5856 /* Hash code for the SET_SRC. */
5858 /* Hash code for the SET_DEST. */
5860 /* The SET_DEST, with SUBREG, etc., stripped. */
5862 /* Place where the pointer to the INNER_DEST was found. */
5863 rtx *inner_dest_loc;
5864 /* Nonzero if the SET_SRC is in memory. */
5866 /* Nonzero if the SET_SRC is in a structure. */
5868 /* Nonzero if the SET_SRC contains something
5869 whose value cannot be predicted and understood. */
5871 /* Original machine mode, in case it becomes a CONST_INT. */
5872 enum machine_mode mode;
5873 /* A constant equivalent for SET_SRC, if any. */
5875 /* Hash code of constant equivalent for SET_SRC. */
5876 int src_const_hash_code;
5877 /* Table entry for constant equivalent for SET_SRC, if any. */
5878 struct table_elt *src_const_elt;
5882 cse_insn (insn, in_libcall_block)
5884 int in_libcall_block;
5886 register rtx x = PATTERN (insn);
5889 register int n_sets = 0;
5891 /* Records what this insn does to set CC0. */
5892 rtx this_insn_cc0 = 0;
5893 enum machine_mode this_insn_cc0_mode;
5894 struct write_data writes_memory;
5895 static struct write_data init = {0, 0, 0, 0};
5898 struct table_elt *src_eqv_elt = 0;
5899 int src_eqv_volatile;
5900 int src_eqv_in_memory;
5901 int src_eqv_in_struct;
5902 int src_eqv_hash_code;
5907 writes_memory = init;
5909 /* Find all the SETs and CLOBBERs in this instruction.
5910 Record all the SETs in the array `set' and count them.
5911 Also determine whether there is a CLOBBER that invalidates
5912 all memory references, or all references at varying addresses. */
5914 if (GET_CODE (x) == SET)
5916 sets = (struct set *) alloca (sizeof (struct set));
5919 /* Ignore SETs that are unconditional jumps.
5920 They never need cse processing, so this does not hurt.
5921 The reason is not efficiency but rather
5922 so that we can test at the end for instructions
5923 that have been simplified to unconditional jumps
5924 and not be misled by unchanged instructions
5925 that were unconditional jumps to begin with. */
5926 if (SET_DEST (x) == pc_rtx
5927 && GET_CODE (SET_SRC (x)) == LABEL_REF)
5930 /* Don't count call-insns, (set (reg 0) (call ...)), as a set.
5931 The hard function value register is used only once, to copy to
5932 someplace else, so it isn't worth cse'ing (and on 80386 is unsafe)!
5933 Ensure we invalidate the destination register. On the 80386 no
5934 other code would invalidate it since it is a fixed_reg.
5935 We need not check the return of apply_change_group; see canon_reg. */
5937 else if (GET_CODE (SET_SRC (x)) == CALL)
5939 canon_reg (SET_SRC (x), insn);
5940 apply_change_group ();
5941 fold_rtx (SET_SRC (x), insn);
5942 invalidate (SET_DEST (x));
5947 else if (GET_CODE (x) == PARALLEL)
5949 register int lim = XVECLEN (x, 0);
5951 sets = (struct set *) alloca (lim * sizeof (struct set));
5953 /* Find all regs explicitly clobbered in this insn,
5954 and ensure they are not replaced with any other regs
5955 elsewhere in this insn.
5956 When a reg that is clobbered is also used for input,
5957 we should presume that that is for a reason,
5958 and we should not substitute some other register
5959 which is not supposed to be clobbered.
5960 Therefore, this loop cannot be merged into the one below
5961 because a CALL may precede a CLOBBER and refer to the
5962 value clobbered. We must not let a canonicalization do
5963 anything in that case. */
5964 for (i = 0; i < lim; i++)
5966 register rtx y = XVECEXP (x, 0, i);
5967 if (GET_CODE (y) == CLOBBER)
5969 rtx clobbered = XEXP (y, 0);
5971 if (GET_CODE (clobbered) == REG
5972 || GET_CODE (clobbered) == SUBREG)
5973 invalidate (clobbered);
5974 else if (GET_CODE (clobbered) == STRICT_LOW_PART
5975 || GET_CODE (clobbered) == ZERO_EXTRACT)
5976 invalidate (XEXP (clobbered, 0));
5980 for (i = 0; i < lim; i++)
5982 register rtx y = XVECEXP (x, 0, i);
5983 if (GET_CODE (y) == SET)
5985 /* As above, we ignore unconditional jumps and call-insns and
5986 ignore the result of apply_change_group. */
5987 if (GET_CODE (SET_SRC (y)) == CALL)
5989 canon_reg (SET_SRC (y), insn);
5990 apply_change_group ();
5991 fold_rtx (SET_SRC (y), insn);
5992 invalidate (SET_DEST (y));
5994 else if (SET_DEST (y) == pc_rtx
5995 && GET_CODE (SET_SRC (y)) == LABEL_REF)
5998 sets[n_sets++].rtl = y;
6000 else if (GET_CODE (y) == CLOBBER)
6002 /* If we clobber memory, take note of that,
6003 and canon the address.
6004 This does nothing when a register is clobbered
6005 because we have already invalidated the reg. */
6006 if (GET_CODE (XEXP (y, 0)) == MEM)
6008 canon_reg (XEXP (y, 0), NULL_RTX);
6009 note_mem_written (XEXP (y, 0), &writes_memory);
6012 else if (GET_CODE (y) == USE
6013 && ! (GET_CODE (XEXP (y, 0)) == REG
6014 && REGNO (XEXP (y, 0)) < FIRST_PSEUDO_REGISTER))
6015 canon_reg (y, NULL_RTX);
6016 else if (GET_CODE (y) == CALL)
6018 /* The result of apply_change_group can be ignored; see
6020 canon_reg (y, insn);
6021 apply_change_group ();
6026 else if (GET_CODE (x) == CLOBBER)
6028 if (GET_CODE (XEXP (x, 0)) == MEM)
6030 canon_reg (XEXP (x, 0), NULL_RTX);
6031 note_mem_written (XEXP (x, 0), &writes_memory);
6035 /* Canonicalize a USE of a pseudo register or memory location. */
6036 else if (GET_CODE (x) == USE
6037 && ! (GET_CODE (XEXP (x, 0)) == REG
6038 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER))
6039 canon_reg (XEXP (x, 0), NULL_RTX);
6040 else if (GET_CODE (x) == CALL)
6042 /* The result of apply_change_group can be ignored; see canon_reg. */
6043 canon_reg (x, insn);
6044 apply_change_group ();
6048 if (n_sets == 1 && REG_NOTES (insn) != 0)
6050 /* Store the equivalent value in SRC_EQV, if different. */
6051 rtx tem = find_reg_note (insn, REG_EQUAL, NULL_RTX);
6053 if (tem && ! rtx_equal_p (XEXP (tem, 0), SET_SRC (sets[0].rtl)))
6054 src_eqv = canon_reg (XEXP (tem, 0), NULL_RTX);
6057 /* Canonicalize sources and addresses of destinations.
6058 We do this in a separate pass to avoid problems when a MATCH_DUP is
6059 present in the insn pattern. In that case, we want to ensure that
6060 we don't break the duplicate nature of the pattern. So we will replace
6061 both operands at the same time. Otherwise, we would fail to find an
6062 equivalent substitution in the loop calling validate_change below.
6064 We used to suppress canonicalization of DEST if it appears in SRC,
6065 but we don't do this any more. */
6067 for (i = 0; i < n_sets; i++)
6069 rtx dest = SET_DEST (sets[i].rtl);
6070 rtx src = SET_SRC (sets[i].rtl);
6071 rtx new = canon_reg (src, insn);
6073 if ((GET_CODE (new) == REG && GET_CODE (src) == REG
6074 && ((REGNO (new) < FIRST_PSEUDO_REGISTER)
6075 != (REGNO (src) < FIRST_PSEUDO_REGISTER)))
6076 || insn_n_dups[recog_memoized (insn)] > 0)
6077 validate_change (insn, &SET_SRC (sets[i].rtl), new, 1);
6079 SET_SRC (sets[i].rtl) = new;
6081 if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
6083 validate_change (insn, &XEXP (dest, 1),
6084 canon_reg (XEXP (dest, 1), insn), 1);
6085 validate_change (insn, &XEXP (dest, 2),
6086 canon_reg (XEXP (dest, 2), insn), 1);
6089 while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART
6090 || GET_CODE (dest) == ZERO_EXTRACT
6091 || GET_CODE (dest) == SIGN_EXTRACT)
6092 dest = XEXP (dest, 0);
6094 if (GET_CODE (dest) == MEM)
6095 canon_reg (dest, insn);
6098 /* Now that we have done all the replacements, we can apply the change
6099 group and see if they all work. Note that this will cause some
6100 canonicalizations that would have worked individually not to be applied
6101 because some other canonicalization didn't work, but this should not
6104 The result of apply_change_group can be ignored; see canon_reg. */
6106 apply_change_group ();
6108 /* Set sets[i].src_elt to the class each source belongs to.
6109 Detect assignments from or to volatile things
6110 and set set[i] to zero so they will be ignored
6111 in the rest of this function.
6113 Nothing in this loop changes the hash table or the register chains. */
6115 for (i = 0; i < n_sets; i++)
6117 register rtx src, dest;
6118 register rtx src_folded;
6119 register struct table_elt *elt = 0, *p;
6120 enum machine_mode mode;
6123 rtx src_related = 0;
6124 struct table_elt *src_const_elt = 0;
6125 int src_cost = 10000, src_eqv_cost = 10000, src_folded_cost = 10000;
6126 int src_related_cost = 10000, src_elt_cost = 10000;
6127 /* Set non-zero if we need to call force_const_mem on with the
6128 contents of src_folded before using it. */
6129 int src_folded_force_flag = 0;
6131 dest = SET_DEST (sets[i].rtl);
6132 src = SET_SRC (sets[i].rtl);
6134 /* If SRC is a constant that has no machine mode,
6135 hash it with the destination's machine mode.
6136 This way we can keep different modes separate. */
6138 mode = GET_MODE (src) == VOIDmode ? GET_MODE (dest) : GET_MODE (src);
6139 sets[i].mode = mode;
6143 enum machine_mode eqvmode = mode;
6144 if (GET_CODE (dest) == STRICT_LOW_PART)
6145 eqvmode = GET_MODE (SUBREG_REG (XEXP (dest, 0)));
6147 hash_arg_in_memory = 0;
6148 hash_arg_in_struct = 0;
6149 src_eqv = fold_rtx (src_eqv, insn);
6150 src_eqv_hash_code = HASH (src_eqv, eqvmode);
6152 /* Find the equivalence class for the equivalent expression. */
6155 src_eqv_elt = lookup (src_eqv, src_eqv_hash_code, eqvmode);
6157 src_eqv_volatile = do_not_record;
6158 src_eqv_in_memory = hash_arg_in_memory;
6159 src_eqv_in_struct = hash_arg_in_struct;
6162 /* If this is a STRICT_LOW_PART assignment, src_eqv corresponds to the
6163 value of the INNER register, not the destination. So it is not
6164 a legal substitution for the source. But save it for later. */
6165 if (GET_CODE (dest) == STRICT_LOW_PART)
6168 src_eqv_here = src_eqv;
6170 /* Simplify and foldable subexpressions in SRC. Then get the fully-
6171 simplified result, which may not necessarily be valid. */
6172 src_folded = fold_rtx (src, insn);
6174 /* If storing a constant in a bitfield, pre-truncate the constant
6175 so we will be able to record it later. */
6176 if (GET_CODE (SET_DEST (sets[i].rtl)) == ZERO_EXTRACT
6177 || GET_CODE (SET_DEST (sets[i].rtl)) == SIGN_EXTRACT)
6179 rtx width = XEXP (SET_DEST (sets[i].rtl), 1);
6181 if (GET_CODE (src) == CONST_INT
6182 && GET_CODE (width) == CONST_INT
6183 && INTVAL (width) < HOST_BITS_PER_WIDE_INT
6184 && (INTVAL (src) & ((HOST_WIDE_INT) (-1) << INTVAL (width))))
6186 = GEN_INT (INTVAL (src) & (((HOST_WIDE_INT) 1
6187 << INTVAL (width)) - 1));
6190 /* Compute SRC's hash code, and also notice if it
6191 should not be recorded at all. In that case,
6192 prevent any further processing of this assignment. */
6194 hash_arg_in_memory = 0;
6195 hash_arg_in_struct = 0;
6198 sets[i].src_hash_code = HASH (src, mode);
6199 sets[i].src_volatile = do_not_record;
6200 sets[i].src_in_memory = hash_arg_in_memory;
6201 sets[i].src_in_struct = hash_arg_in_struct;
6204 /* It is no longer clear why we used to do this, but it doesn't
6205 appear to still be needed. So let's try without it since this
6206 code hurts cse'ing widened ops. */
6207 /* If source is a perverse subreg (such as QI treated as an SI),
6208 treat it as volatile. It may do the work of an SI in one context
6209 where the extra bits are not being used, but cannot replace an SI
6211 if (GET_CODE (src) == SUBREG
6212 && (GET_MODE_SIZE (GET_MODE (src))
6213 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))))
6214 sets[i].src_volatile = 1;
6217 /* Locate all possible equivalent forms for SRC. Try to replace
6218 SRC in the insn with each cheaper equivalent.
6220 We have the following types of equivalents: SRC itself, a folded
6221 version, a value given in a REG_EQUAL note, or a value related
6224 Each of these equivalents may be part of an additional class
6225 of equivalents (if more than one is in the table, they must be in
6226 the same class; we check for this).
6228 If the source is volatile, we don't do any table lookups.
6230 We note any constant equivalent for possible later use in a
6233 if (!sets[i].src_volatile)
6234 elt = lookup (src, sets[i].src_hash_code, mode);
6236 sets[i].src_elt = elt;
6238 if (elt && src_eqv_here && src_eqv_elt)
6240 if (elt->first_same_value != src_eqv_elt->first_same_value)
6242 /* The REG_EQUAL is indicating that two formerly distinct
6243 classes are now equivalent. So merge them. */
6244 merge_equiv_classes (elt, src_eqv_elt);
6245 src_eqv_hash_code = HASH (src_eqv, elt->mode);
6246 src_eqv_elt = lookup (src_eqv, src_eqv_hash_code, elt->mode);
6252 else if (src_eqv_elt)
6255 /* Try to find a constant somewhere and record it in `src_const'.
6256 Record its table element, if any, in `src_const_elt'. Look in
6257 any known equivalences first. (If the constant is not in the
6258 table, also set `sets[i].src_const_hash_code'). */
6260 for (p = elt->first_same_value; p; p = p->next_same_value)
6264 src_const_elt = elt;
6269 && (CONSTANT_P (src_folded)
6270 /* Consider (minus (label_ref L1) (label_ref L2)) as
6271 "constant" here so we will record it. This allows us
6272 to fold switch statements when an ADDR_DIFF_VEC is used. */
6273 || (GET_CODE (src_folded) == MINUS
6274 && GET_CODE (XEXP (src_folded, 0)) == LABEL_REF
6275 && GET_CODE (XEXP (src_folded, 1)) == LABEL_REF)))
6276 src_const = src_folded, src_const_elt = elt;
6277 else if (src_const == 0 && src_eqv_here && CONSTANT_P (src_eqv_here))
6278 src_const = src_eqv_here, src_const_elt = src_eqv_elt;
6280 /* If we don't know if the constant is in the table, get its
6281 hash code and look it up. */
6282 if (src_const && src_const_elt == 0)
6284 sets[i].src_const_hash_code = HASH (src_const, mode);
6285 src_const_elt = lookup (src_const, sets[i].src_const_hash_code,
6289 sets[i].src_const = src_const;
6290 sets[i].src_const_elt = src_const_elt;
6292 /* If the constant and our source are both in the table, mark them as
6293 equivalent. Otherwise, if a constant is in the table but the source
6294 isn't, set ELT to it. */
6295 if (src_const_elt && elt
6296 && src_const_elt->first_same_value != elt->first_same_value)
6297 merge_equiv_classes (elt, src_const_elt);
6298 else if (src_const_elt && elt == 0)
6299 elt = src_const_elt;
6301 /* See if there is a register linearly related to a constant
6302 equivalent of SRC. */
6304 && (GET_CODE (src_const) == CONST
6305 || (src_const_elt && src_const_elt->related_value != 0)))
6307 src_related = use_related_value (src_const, src_const_elt);
6310 struct table_elt *src_related_elt
6311 = lookup (src_related, HASH (src_related, mode), mode);
6312 if (src_related_elt && elt)
6314 if (elt->first_same_value
6315 != src_related_elt->first_same_value)
6316 /* This can occur when we previously saw a CONST
6317 involving a SYMBOL_REF and then see the SYMBOL_REF
6318 twice. Merge the involved classes. */
6319 merge_equiv_classes (elt, src_related_elt);
6322 src_related_elt = 0;
6324 else if (src_related_elt && elt == 0)
6325 elt = src_related_elt;
6329 /* See if we have a CONST_INT that is already in a register in a
6332 if (src_const && src_related == 0 && GET_CODE (src_const) == CONST_INT
6333 && GET_MODE_CLASS (mode) == MODE_INT
6334 && GET_MODE_BITSIZE (mode) < BITS_PER_WORD)
6336 enum machine_mode wider_mode;
6338 for (wider_mode = GET_MODE_WIDER_MODE (mode);
6339 GET_MODE_BITSIZE (wider_mode) <= BITS_PER_WORD
6340 && src_related == 0;
6341 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
6343 struct table_elt *const_elt
6344 = lookup (src_const, HASH (src_const, wider_mode), wider_mode);
6349 for (const_elt = const_elt->first_same_value;
6350 const_elt; const_elt = const_elt->next_same_value)
6351 if (GET_CODE (const_elt->exp) == REG)
6353 src_related = gen_lowpart_if_possible (mode,
6360 /* Another possibility is that we have an AND with a constant in
6361 a mode narrower than a word. If so, it might have been generated
6362 as part of an "if" which would narrow the AND. If we already
6363 have done the AND in a wider mode, we can use a SUBREG of that
6366 if (flag_expensive_optimizations && ! src_related
6367 && GET_CODE (src) == AND && GET_CODE (XEXP (src, 1)) == CONST_INT
6368 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6370 enum machine_mode tmode;
6371 rtx new_and = gen_rtx (AND, VOIDmode, NULL_RTX, XEXP (src, 1));
6373 for (tmode = GET_MODE_WIDER_MODE (mode);
6374 GET_MODE_SIZE (tmode) <= UNITS_PER_WORD;
6375 tmode = GET_MODE_WIDER_MODE (tmode))
6377 rtx inner = gen_lowpart_if_possible (tmode, XEXP (src, 0));
6378 struct table_elt *larger_elt;
6382 PUT_MODE (new_and, tmode);
6383 XEXP (new_and, 0) = inner;
6384 larger_elt = lookup (new_and, HASH (new_and, tmode), tmode);
6385 if (larger_elt == 0)
6388 for (larger_elt = larger_elt->first_same_value;
6389 larger_elt; larger_elt = larger_elt->next_same_value)
6390 if (GET_CODE (larger_elt->exp) == REG)
6393 = gen_lowpart_if_possible (mode, larger_elt->exp);
6403 if (src == src_folded)
6406 /* At this point, ELT, if non-zero, points to a class of expressions
6407 equivalent to the source of this SET and SRC, SRC_EQV, SRC_FOLDED,
6408 and SRC_RELATED, if non-zero, each contain additional equivalent
6409 expressions. Prune these latter expressions by deleting expressions
6410 already in the equivalence class.
6412 Check for an equivalent identical to the destination. If found,
6413 this is the preferred equivalent since it will likely lead to
6414 elimination of the insn. Indicate this by placing it in
6417 if (elt) elt = elt->first_same_value;
6418 for (p = elt; p; p = p->next_same_value)
6420 enum rtx_code code = GET_CODE (p->exp);
6422 /* If the expression is not valid, ignore it. Then we do not
6423 have to check for validity below. In most cases, we can use
6424 `rtx_equal_p', since canonicalization has already been done. */
6425 if (code != REG && ! exp_equiv_p (p->exp, p->exp, 1, 0))
6428 if (src && GET_CODE (src) == code && rtx_equal_p (src, p->exp))
6430 else if (src_folded && GET_CODE (src_folded) == code
6431 && rtx_equal_p (src_folded, p->exp))
6433 else if (src_eqv_here && GET_CODE (src_eqv_here) == code
6434 && rtx_equal_p (src_eqv_here, p->exp))
6436 else if (src_related && GET_CODE (src_related) == code
6437 && rtx_equal_p (src_related, p->exp))
6440 /* This is the same as the destination of the insns, we want
6441 to prefer it. Copy it to src_related. The code below will
6442 then give it a negative cost. */
6443 if (GET_CODE (dest) == code && rtx_equal_p (p->exp, dest))
6448 /* Find the cheapest valid equivalent, trying all the available
6449 possibilities. Prefer items not in the hash table to ones
6450 that are when they are equal cost. Note that we can never
6451 worsen an insn as the current contents will also succeed.
6452 If we find an equivalent identical to the destination, use it as best,
6453 since this insn will probably be eliminated in that case. */
6456 if (rtx_equal_p (src, dest))
6459 src_cost = COST (src);
6464 if (rtx_equal_p (src_eqv_here, dest))
6467 src_eqv_cost = COST (src_eqv_here);
6472 if (rtx_equal_p (src_folded, dest))
6473 src_folded_cost = -1;
6475 src_folded_cost = COST (src_folded);
6480 if (rtx_equal_p (src_related, dest))
6481 src_related_cost = -1;
6483 src_related_cost = COST (src_related);
6486 /* If this was an indirect jump insn, a known label will really be
6487 cheaper even though it looks more expensive. */
6488 if (dest == pc_rtx && src_const && GET_CODE (src_const) == LABEL_REF)
6489 src_folded = src_const, src_folded_cost = -1;
6491 /* Terminate loop when replacement made. This must terminate since
6492 the current contents will be tested and will always be valid. */
6497 /* Skip invalid entries. */
6498 while (elt && GET_CODE (elt->exp) != REG
6499 && ! exp_equiv_p (elt->exp, elt->exp, 1, 0))
6500 elt = elt->next_same_value;
6502 if (elt) src_elt_cost = elt->cost;
6504 /* Find cheapest and skip it for the next time. For items
6505 of equal cost, use this order:
6506 src_folded, src, src_eqv, src_related and hash table entry. */
6507 if (src_folded_cost <= src_cost
6508 && src_folded_cost <= src_eqv_cost
6509 && src_folded_cost <= src_related_cost
6510 && src_folded_cost <= src_elt_cost)
6512 trial = src_folded, src_folded_cost = 10000;
6513 if (src_folded_force_flag)
6514 trial = force_const_mem (mode, trial);
6516 else if (src_cost <= src_eqv_cost
6517 && src_cost <= src_related_cost
6518 && src_cost <= src_elt_cost)
6519 trial = src, src_cost = 10000;
6520 else if (src_eqv_cost <= src_related_cost
6521 && src_eqv_cost <= src_elt_cost)
6522 trial = copy_rtx (src_eqv_here), src_eqv_cost = 10000;
6523 else if (src_related_cost <= src_elt_cost)
6524 trial = copy_rtx (src_related), src_related_cost = 10000;
6527 trial = copy_rtx (elt->exp);
6528 elt = elt->next_same_value;
6529 src_elt_cost = 10000;
6532 /* We don't normally have an insn matching (set (pc) (pc)), so
6533 check for this separately here. We will delete such an
6536 Tablejump insns contain a USE of the table, so simply replacing
6537 the operand with the constant won't match. This is simply an
6538 unconditional branch, however, and is therefore valid. Just
6539 insert the substitution here and we will delete and re-emit
6542 if (n_sets == 1 && dest == pc_rtx
6544 || (GET_CODE (trial) == LABEL_REF
6545 && ! condjump_p (insn))))
6547 /* If TRIAL is a label in front of a jump table, we are
6548 really falling through the switch (this is how casesi
6549 insns work), so we must branch around the table. */
6550 if (GET_CODE (trial) == CODE_LABEL
6551 && NEXT_INSN (trial) != 0
6552 && GET_CODE (NEXT_INSN (trial)) == JUMP_INSN
6553 && (GET_CODE (PATTERN (NEXT_INSN (trial))) == ADDR_DIFF_VEC
6554 || GET_CODE (PATTERN (NEXT_INSN (trial))) == ADDR_VEC))
6556 trial = gen_rtx (LABEL_REF, Pmode, get_label_after (trial));
6558 SET_SRC (sets[i].rtl) = trial;
6559 cse_jumps_altered = 1;
6563 /* Look for a substitution that makes a valid insn. */
6564 else if (validate_change (insn, &SET_SRC (sets[i].rtl), trial, 0))
6566 /* The result of apply_change_group can be ignored; see
6569 validate_change (insn, &SET_SRC (sets[i].rtl),
6570 canon_reg (SET_SRC (sets[i].rtl), insn),
6572 apply_change_group ();
6576 /* If we previously found constant pool entries for
6577 constants and this is a constant, try making a
6578 pool entry. Put it in src_folded unless we already have done
6579 this since that is where it likely came from. */
6581 else if (constant_pool_entries_cost
6582 && CONSTANT_P (trial)
6583 && (src_folded == 0 || GET_CODE (src_folded) != MEM)
6584 && GET_MODE_CLASS (mode) != MODE_CC)
6586 src_folded_force_flag = 1;
6588 src_folded_cost = constant_pool_entries_cost;
6592 src = SET_SRC (sets[i].rtl);
6594 /* In general, it is good to have a SET with SET_SRC == SET_DEST.
6595 However, there is an important exception: If both are registers
6596 that are not the head of their equivalence class, replace SET_SRC
6597 with the head of the class. If we do not do this, we will have
6598 both registers live over a portion of the basic block. This way,
6599 their lifetimes will likely abut instead of overlapping. */
6600 if (GET_CODE (dest) == REG
6601 && REGNO_QTY_VALID_P (REGNO (dest))
6602 && qty_mode[reg_qty[REGNO (dest)]] == GET_MODE (dest)
6603 && qty_first_reg[reg_qty[REGNO (dest)]] != REGNO (dest)
6604 && GET_CODE (src) == REG && REGNO (src) == REGNO (dest)
6605 /* Don't do this if the original insn had a hard reg as
6607 && (GET_CODE (sets[i].src) != REG
6608 || REGNO (sets[i].src) >= FIRST_PSEUDO_REGISTER))
6609 /* We can't call canon_reg here because it won't do anything if
6610 SRC is a hard register. */
6612 int first = qty_first_reg[reg_qty[REGNO (src)]];
6614 src = SET_SRC (sets[i].rtl)
6615 = first >= FIRST_PSEUDO_REGISTER ? regno_reg_rtx[first]
6616 : gen_rtx (REG, GET_MODE (src), first);
6618 /* If we had a constant that is cheaper than what we are now
6619 setting SRC to, use that constant. We ignored it when we
6620 thought we could make this into a no-op. */
6621 if (src_const && COST (src_const) < COST (src)
6622 && validate_change (insn, &SET_SRC (sets[i].rtl), src_const, 0))
6626 /* If we made a change, recompute SRC values. */
6627 if (src != sets[i].src)
6630 hash_arg_in_memory = 0;
6631 hash_arg_in_struct = 0;
6633 sets[i].src_hash_code = HASH (src, mode);
6634 sets[i].src_volatile = do_not_record;
6635 sets[i].src_in_memory = hash_arg_in_memory;
6636 sets[i].src_in_struct = hash_arg_in_struct;
6637 sets[i].src_elt = lookup (src, sets[i].src_hash_code, mode);
6640 /* If this is a single SET, we are setting a register, and we have an
6641 equivalent constant, we want to add a REG_NOTE. We don't want
6642 to write a REG_EQUAL note for a constant pseudo since verifying that
6643 that pseudo hasn't been eliminated is a pain. Such a note also
6644 won't help anything. */
6645 if (n_sets == 1 && src_const && GET_CODE (dest) == REG
6646 && GET_CODE (src_const) != REG)
6648 rtx tem = find_reg_note (insn, REG_EQUAL, NULL_RTX);
6650 /* Record the actual constant value in a REG_EQUAL note, making
6651 a new one if one does not already exist. */
6653 XEXP (tem, 0) = src_const;
6655 REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_EQUAL,
6656 src_const, REG_NOTES (insn));
6658 /* If storing a constant value in a register that
6659 previously held the constant value 0,
6660 record this fact with a REG_WAS_0 note on this insn.
6662 Note that the *register* is required to have previously held 0,
6663 not just any register in the quantity and we must point to the
6664 insn that set that register to zero.
6666 Rather than track each register individually, we just see if
6667 the last set for this quantity was for this register. */
6669 if (REGNO_QTY_VALID_P (REGNO (dest))
6670 && qty_const[reg_qty[REGNO (dest)]] == const0_rtx)
6672 /* See if we previously had a REG_WAS_0 note. */
6673 rtx note = find_reg_note (insn, REG_WAS_0, NULL_RTX);
6674 rtx const_insn = qty_const_insn[reg_qty[REGNO (dest)]];
6676 if ((tem = single_set (const_insn)) != 0
6677 && rtx_equal_p (SET_DEST (tem), dest))
6680 XEXP (note, 0) = const_insn;
6682 REG_NOTES (insn) = gen_rtx (INSN_LIST, REG_WAS_0,
6683 const_insn, REG_NOTES (insn));
6688 /* Now deal with the destination. */
6690 sets[i].inner_dest_loc = &SET_DEST (sets[0].rtl);
6692 /* Look within any SIGN_EXTRACT or ZERO_EXTRACT
6693 to the MEM or REG within it. */
6694 while (GET_CODE (dest) == SIGN_EXTRACT
6695 || GET_CODE (dest) == ZERO_EXTRACT
6696 || GET_CODE (dest) == SUBREG
6697 || GET_CODE (dest) == STRICT_LOW_PART)
6699 sets[i].inner_dest_loc = &XEXP (dest, 0);
6700 dest = XEXP (dest, 0);
6703 sets[i].inner_dest = dest;
6705 if (GET_CODE (dest) == MEM)
6707 dest = fold_rtx (dest, insn);
6709 /* Decide whether we invalidate everything in memory,
6710 or just things at non-fixed places.
6711 Writing a large aggregate must invalidate everything
6712 because we don't know how long it is. */
6713 note_mem_written (dest, &writes_memory);
6716 /* Compute the hash code of the destination now,
6717 before the effects of this instruction are recorded,
6718 since the register values used in the address computation
6719 are those before this instruction. */
6720 sets[i].dest_hash_code = HASH (dest, mode);
6722 /* Don't enter a bit-field in the hash table
6723 because the value in it after the store
6724 may not equal what was stored, due to truncation. */
6726 if (GET_CODE (SET_DEST (sets[i].rtl)) == ZERO_EXTRACT
6727 || GET_CODE (SET_DEST (sets[i].rtl)) == SIGN_EXTRACT)
6729 rtx width = XEXP (SET_DEST (sets[i].rtl), 1);
6731 if (src_const != 0 && GET_CODE (src_const) == CONST_INT
6732 && GET_CODE (width) == CONST_INT
6733 && INTVAL (width) < HOST_BITS_PER_WIDE_INT
6734 && ! (INTVAL (src_const)
6735 & ((HOST_WIDE_INT) (-1) << INTVAL (width))))
6736 /* Exception: if the value is constant,
6737 and it won't be truncated, record it. */
6741 /* This is chosen so that the destination will be invalidated
6742 but no new value will be recorded.
6743 We must invalidate because sometimes constant
6744 values can be recorded for bitfields. */
6745 sets[i].src_elt = 0;
6746 sets[i].src_volatile = 1;
6752 /* If only one set in a JUMP_INSN and it is now a no-op, we can delete
6754 else if (n_sets == 1 && dest == pc_rtx && src == pc_rtx)
6756 PUT_CODE (insn, NOTE);
6757 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
6758 NOTE_SOURCE_FILE (insn) = 0;
6759 cse_jumps_altered = 1;
6760 /* One less use of the label this insn used to jump to. */
6761 --LABEL_NUSES (JUMP_LABEL (insn));
6762 /* No more processing for this set. */
6766 /* If this SET is now setting PC to a label, we know it used to
6767 be a conditional or computed branch. So we see if we can follow
6768 it. If it was a computed branch, delete it and re-emit. */
6769 else if (dest == pc_rtx && GET_CODE (src) == LABEL_REF)
6773 /* If this is not in the format for a simple branch and
6774 we are the only SET in it, re-emit it. */
6775 if (! simplejump_p (insn) && n_sets == 1)
6777 rtx new = emit_jump_insn_before (gen_jump (XEXP (src, 0)), insn);
6778 JUMP_LABEL (new) = XEXP (src, 0);
6779 LABEL_NUSES (XEXP (src, 0))++;
6784 /* Otherwise, force rerecognition, since it probably had
6785 a different pattern before.
6786 This shouldn't really be necessary, since whatever
6787 changed the source value above should have done this.
6788 Until the right place is found, might as well do this here. */
6789 INSN_CODE (insn) = -1;
6791 /* Now that we've converted this jump to an unconditional jump,
6792 there is dead code after it. Delete the dead code until we
6793 reach a BARRIER, the end of the function, or a label. Do
6794 not delete NOTEs except for NOTE_INSN_DELETED since later
6795 phases assume these notes are retained. */
6799 while (NEXT_INSN (p) != 0
6800 && GET_CODE (NEXT_INSN (p)) != BARRIER
6801 && GET_CODE (NEXT_INSN (p)) != CODE_LABEL)
6803 if (GET_CODE (NEXT_INSN (p)) != NOTE
6804 || NOTE_LINE_NUMBER (NEXT_INSN (p)) == NOTE_INSN_DELETED)
6805 delete_insn (NEXT_INSN (p));
6810 /* If we don't have a BARRIER immediately after INSN, put one there.
6811 Much code assumes that there are no NOTEs between a JUMP_INSN and
6814 if (NEXT_INSN (insn) == 0
6815 || GET_CODE (NEXT_INSN (insn)) != BARRIER)
6816 emit_barrier_after (insn);
6818 /* We might have two BARRIERs separated by notes. Delete the second
6821 if (p != insn && NEXT_INSN (p) != 0
6822 && GET_CODE (NEXT_INSN (p)) == BARRIER)
6823 delete_insn (NEXT_INSN (p));
6825 cse_jumps_altered = 1;
6829 /* If destination is volatile, invalidate it and then do no further
6830 processing for this assignment. */
6832 else if (do_not_record)
6834 if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG
6835 || GET_CODE (dest) == MEM)
6837 else if (GET_CODE (dest) == STRICT_LOW_PART
6838 || GET_CODE (dest) == ZERO_EXTRACT)
6839 invalidate (XEXP (dest, 0));
6843 if (sets[i].rtl != 0 && dest != SET_DEST (sets[i].rtl))
6844 sets[i].dest_hash_code = HASH (SET_DEST (sets[i].rtl), mode);
6847 /* If setting CC0, record what it was set to, or a constant, if it
6848 is equivalent to a constant. If it is being set to a floating-point
6849 value, make a COMPARE with the appropriate constant of 0. If we
6850 don't do this, later code can interpret this as a test against
6851 const0_rtx, which can cause problems if we try to put it into an
6852 insn as a floating-point operand. */
6853 if (dest == cc0_rtx)
6855 this_insn_cc0 = src_const && mode != VOIDmode ? src_const : src;
6856 this_insn_cc0_mode = mode;
6857 if (FLOAT_MODE_P (mode))
6858 this_insn_cc0 = gen_rtx (COMPARE, VOIDmode, this_insn_cc0,
6864 /* Now enter all non-volatile source expressions in the hash table
6865 if they are not already present.
6866 Record their equivalence classes in src_elt.
6867 This way we can insert the corresponding destinations into
6868 the same classes even if the actual sources are no longer in them
6869 (having been invalidated). */
6871 if (src_eqv && src_eqv_elt == 0 && sets[0].rtl != 0 && ! src_eqv_volatile
6872 && ! rtx_equal_p (src_eqv, SET_DEST (sets[0].rtl)))
6874 register struct table_elt *elt;
6875 register struct table_elt *classp = sets[0].src_elt;
6876 rtx dest = SET_DEST (sets[0].rtl);
6877 enum machine_mode eqvmode = GET_MODE (dest);
6879 if (GET_CODE (dest) == STRICT_LOW_PART)
6881 eqvmode = GET_MODE (SUBREG_REG (XEXP (dest, 0)));
6884 if (insert_regs (src_eqv, classp, 0))
6885 src_eqv_hash_code = HASH (src_eqv, eqvmode);
6886 elt = insert (src_eqv, classp, src_eqv_hash_code, eqvmode);
6887 elt->in_memory = src_eqv_in_memory;
6888 elt->in_struct = src_eqv_in_struct;
6891 /* Check to see if src_eqv_elt is the same as a set source which
6892 does not yet have an elt, and if so set the elt of the set source
6894 for (i = 0; i < n_sets; i++)
6895 if (sets[i].rtl && sets[i].src_elt == 0
6896 && rtx_equal_p (SET_SRC (sets[i].rtl), src_eqv))
6897 sets[i].src_elt = src_eqv_elt;
6900 for (i = 0; i < n_sets; i++)
6901 if (sets[i].rtl && ! sets[i].src_volatile
6902 && ! rtx_equal_p (SET_SRC (sets[i].rtl), SET_DEST (sets[i].rtl)))
6904 if (GET_CODE (SET_DEST (sets[i].rtl)) == STRICT_LOW_PART)
6906 /* REG_EQUAL in setting a STRICT_LOW_PART
6907 gives an equivalent for the entire destination register,
6908 not just for the subreg being stored in now.
6909 This is a more interesting equivalence, so we arrange later
6910 to treat the entire reg as the destination. */
6911 sets[i].src_elt = src_eqv_elt;
6912 sets[i].src_hash_code = src_eqv_hash_code;
6916 /* Insert source and constant equivalent into hash table, if not
6918 register struct table_elt *classp = src_eqv_elt;
6919 register rtx src = sets[i].src;
6920 register rtx dest = SET_DEST (sets[i].rtl);
6921 enum machine_mode mode
6922 = GET_MODE (src) == VOIDmode ? GET_MODE (dest) : GET_MODE (src);
6924 if (sets[i].src_elt == 0)
6926 register struct table_elt *elt;
6928 /* Note that these insert_regs calls cannot remove
6929 any of the src_elt's, because they would have failed to
6930 match if not still valid. */
6931 if (insert_regs (src, classp, 0))
6932 sets[i].src_hash_code = HASH (src, mode);
6933 elt = insert (src, classp, sets[i].src_hash_code, mode);
6934 elt->in_memory = sets[i].src_in_memory;
6935 elt->in_struct = sets[i].src_in_struct;
6936 sets[i].src_elt = classp = elt;
6939 if (sets[i].src_const && sets[i].src_const_elt == 0
6940 && src != sets[i].src_const
6941 && ! rtx_equal_p (sets[i].src_const, src))
6942 sets[i].src_elt = insert (sets[i].src_const, classp,
6943 sets[i].src_const_hash_code, mode);
6946 else if (sets[i].src_elt == 0)
6947 /* If we did not insert the source into the hash table (e.g., it was
6948 volatile), note the equivalence class for the REG_EQUAL value, if any,
6949 so that the destination goes into that class. */
6950 sets[i].src_elt = src_eqv_elt;
6952 invalidate_from_clobbers (&writes_memory, x);
6954 /* Some registers are invalidated by subroutine calls. Memory is
6955 invalidated by non-constant calls. */
6957 if (GET_CODE (insn) == CALL_INSN)
6959 static struct write_data everything = {0, 1, 1, 1};
6961 if (! CONST_CALL_P (insn))
6962 invalidate_memory (&everything);
6963 invalidate_for_call ();
6966 /* Now invalidate everything set by this instruction.
6967 If a SUBREG or other funny destination is being set,
6968 sets[i].rtl is still nonzero, so here we invalidate the reg
6969 a part of which is being set. */
6971 for (i = 0; i < n_sets; i++)
6974 register rtx dest = sets[i].inner_dest;
6976 /* Needed for registers to remove the register from its
6977 previous quantity's chain.
6978 Needed for memory if this is a nonvarying address, unless
6979 we have just done an invalidate_memory that covers even those. */
6980 if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG
6981 || (! writes_memory.all && ! cse_rtx_addr_varies_p (dest)))
6983 else if (GET_CODE (dest) == STRICT_LOW_PART
6984 || GET_CODE (dest) == ZERO_EXTRACT)
6985 invalidate (XEXP (dest, 0));
6988 /* Make sure registers mentioned in destinations
6989 are safe for use in an expression to be inserted.
6990 This removes from the hash table
6991 any invalid entry that refers to one of these registers.
6993 We don't care about the return value from mention_regs because
6994 we are going to hash the SET_DEST values unconditionally. */
6996 for (i = 0; i < n_sets; i++)
6997 if (sets[i].rtl && GET_CODE (SET_DEST (sets[i].rtl)) != REG)
6998 mention_regs (SET_DEST (sets[i].rtl));
7000 /* We may have just removed some of the src_elt's from the hash table.
7001 So replace each one with the current head of the same class. */
7003 for (i = 0; i < n_sets; i++)
7006 if (sets[i].src_elt && sets[i].src_elt->first_same_value == 0)
7007 /* If elt was removed, find current head of same class,
7008 or 0 if nothing remains of that class. */
7010 register struct table_elt *elt = sets[i].src_elt;
7012 while (elt && elt->prev_same_value)
7013 elt = elt->prev_same_value;
7015 while (elt && elt->first_same_value == 0)
7016 elt = elt->next_same_value;
7017 sets[i].src_elt = elt ? elt->first_same_value : 0;
7021 /* Now insert the destinations into their equivalence classes. */
7023 for (i = 0; i < n_sets; i++)
7026 register rtx dest = SET_DEST (sets[i].rtl);
7027 register struct table_elt *elt;
7029 /* Don't record value if we are not supposed to risk allocating
7030 floating-point values in registers that might be wider than
7032 if ((flag_float_store
7033 && GET_CODE (dest) == MEM
7034 && FLOAT_MODE_P (GET_MODE (dest)))
7035 /* Don't record values of destinations set inside a libcall block
7036 since we might delete the libcall. Things should have been set
7037 up so we won't want to reuse such a value, but we play it safe
7040 /* If we didn't put a REG_EQUAL value or a source into the hash
7041 table, there is no point is recording DEST. */
7042 || sets[i].src_elt == 0)
7045 /* STRICT_LOW_PART isn't part of the value BEING set,
7046 and neither is the SUBREG inside it.
7047 Note that in this case SETS[I].SRC_ELT is really SRC_EQV_ELT. */
7048 if (GET_CODE (dest) == STRICT_LOW_PART)
7049 dest = SUBREG_REG (XEXP (dest, 0));
7051 if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG)
7052 /* Registers must also be inserted into chains for quantities. */
7053 if (insert_regs (dest, sets[i].src_elt, 1))
7054 /* If `insert_regs' changes something, the hash code must be
7056 sets[i].dest_hash_code = HASH (dest, GET_MODE (dest));
7058 elt = insert (dest, sets[i].src_elt,
7059 sets[i].dest_hash_code, GET_MODE (dest));
7060 elt->in_memory = GET_CODE (sets[i].inner_dest) == MEM;
7063 /* This implicitly assumes a whole struct
7064 need not have MEM_IN_STRUCT_P.
7065 But a whole struct is *supposed* to have MEM_IN_STRUCT_P. */
7066 elt->in_struct = (MEM_IN_STRUCT_P (sets[i].inner_dest)
7067 || sets[i].inner_dest != SET_DEST (sets[i].rtl));
7070 /* If we have (set (subreg:m1 (reg:m2 foo) 0) (bar:m1)), M1 is no
7071 narrower than M2, and both M1 and M2 are the same number of words,
7072 we are also doing (set (reg:m2 foo) (subreg:m2 (bar:m1) 0)) so
7073 make that equivalence as well.
7075 However, BAR may have equivalences for which gen_lowpart_if_possible
7076 will produce a simpler value than gen_lowpart_if_possible applied to
7077 BAR (e.g., if BAR was ZERO_EXTENDed from M2), so we will scan all
7078 BAR's equivalences. If we don't get a simplified form, make
7079 the SUBREG. It will not be used in an equivalence, but will
7080 cause two similar assignments to be detected.
7082 Note the loop below will find SUBREG_REG (DEST) since we have
7083 already entered SRC and DEST of the SET in the table. */
7085 if (GET_CODE (dest) == SUBREG
7086 && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))) / UNITS_PER_WORD
7087 == GET_MODE_SIZE (GET_MODE (dest)) / UNITS_PER_WORD)
7088 && (GET_MODE_SIZE (GET_MODE (dest))
7089 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))))
7090 && sets[i].src_elt != 0)
7092 enum machine_mode new_mode = GET_MODE (SUBREG_REG (dest));
7093 struct table_elt *elt, *classp = 0;
7095 for (elt = sets[i].src_elt->first_same_value; elt;
7096 elt = elt->next_same_value)
7100 struct table_elt *src_elt;
7102 /* Ignore invalid entries. */
7103 if (GET_CODE (elt->exp) != REG
7104 && ! exp_equiv_p (elt->exp, elt->exp, 1, 0))
7107 new_src = gen_lowpart_if_possible (new_mode, elt->exp);
7109 new_src = gen_rtx (SUBREG, new_mode, elt->exp, 0);
7111 src_hash = HASH (new_src, new_mode);
7112 src_elt = lookup (new_src, src_hash, new_mode);
7114 /* Put the new source in the hash table is if isn't
7118 if (insert_regs (new_src, classp, 0))
7119 src_hash = HASH (new_src, new_mode);
7120 src_elt = insert (new_src, classp, src_hash, new_mode);
7121 src_elt->in_memory = elt->in_memory;
7122 src_elt->in_struct = elt->in_struct;
7124 else if (classp && classp != src_elt->first_same_value)
7125 /* Show that two things that we've seen before are
7126 actually the same. */
7127 merge_equiv_classes (src_elt, classp);
7129 classp = src_elt->first_same_value;
7134 /* Special handling for (set REG0 REG1)
7135 where REG0 is the "cheapest", cheaper than REG1.
7136 After cse, REG1 will probably not be used in the sequel,
7137 so (if easily done) change this insn to (set REG1 REG0) and
7138 replace REG1 with REG0 in the previous insn that computed their value.
7139 Then REG1 will become a dead store and won't cloud the situation
7140 for later optimizations.
7142 Do not make this change if REG1 is a hard register, because it will
7143 then be used in the sequel and we may be changing a two-operand insn
7144 into a three-operand insn.
7146 Also do not do this if we are operating on a copy of INSN. */
7148 if (n_sets == 1 && sets[0].rtl && GET_CODE (SET_DEST (sets[0].rtl)) == REG
7149 && NEXT_INSN (PREV_INSN (insn)) == insn
7150 && GET_CODE (SET_SRC (sets[0].rtl)) == REG
7151 && REGNO (SET_SRC (sets[0].rtl)) >= FIRST_PSEUDO_REGISTER
7152 && REGNO_QTY_VALID_P (REGNO (SET_SRC (sets[0].rtl)))
7153 && (qty_first_reg[reg_qty[REGNO (SET_SRC (sets[0].rtl))]]
7154 == REGNO (SET_DEST (sets[0].rtl))))
7156 rtx prev = PREV_INSN (insn);
7157 while (prev && GET_CODE (prev) == NOTE)
7158 prev = PREV_INSN (prev);
7160 if (prev && GET_CODE (prev) == INSN && GET_CODE (PATTERN (prev)) == SET
7161 && SET_DEST (PATTERN (prev)) == SET_SRC (sets[0].rtl))
7163 rtx dest = SET_DEST (sets[0].rtl);
7164 rtx note = find_reg_note (prev, REG_EQUIV, NULL_RTX);
7166 validate_change (prev, & SET_DEST (PATTERN (prev)), dest, 1);
7167 validate_change (insn, & SET_DEST (sets[0].rtl),
7168 SET_SRC (sets[0].rtl), 1);
7169 validate_change (insn, & SET_SRC (sets[0].rtl), dest, 1);
7170 apply_change_group ();
7172 /* If REG1 was equivalent to a constant, REG0 is not. */
7174 PUT_REG_NOTE_KIND (note, REG_EQUAL);
7176 /* If there was a REG_WAS_0 note on PREV, remove it. Move
7177 any REG_WAS_0 note on INSN to PREV. */
7178 note = find_reg_note (prev, REG_WAS_0, NULL_RTX);
7180 remove_note (prev, note);
7182 note = find_reg_note (insn, REG_WAS_0, NULL_RTX);
7185 remove_note (insn, note);
7186 XEXP (note, 1) = REG_NOTES (prev);
7187 REG_NOTES (prev) = note;
7192 /* If this is a conditional jump insn, record any known equivalences due to
7193 the condition being tested. */
7195 last_jump_equiv_class = 0;
7196 if (GET_CODE (insn) == JUMP_INSN
7197 && n_sets == 1 && GET_CODE (x) == SET
7198 && GET_CODE (SET_SRC (x)) == IF_THEN_ELSE)
7199 record_jump_equiv (insn, 0);
7202 /* If the previous insn set CC0 and this insn no longer references CC0,
7203 delete the previous insn. Here we use the fact that nothing expects CC0
7204 to be valid over an insn, which is true until the final pass. */
7205 if (prev_insn && GET_CODE (prev_insn) == INSN
7206 && (tem = single_set (prev_insn)) != 0
7207 && SET_DEST (tem) == cc0_rtx
7208 && ! reg_mentioned_p (cc0_rtx, x))
7210 PUT_CODE (prev_insn, NOTE);
7211 NOTE_LINE_NUMBER (prev_insn) = NOTE_INSN_DELETED;
7212 NOTE_SOURCE_FILE (prev_insn) = 0;
7215 prev_insn_cc0 = this_insn_cc0;
7216 prev_insn_cc0_mode = this_insn_cc0_mode;
7222 /* Store 1 in *WRITES_PTR for those categories of memory ref
7223 that must be invalidated when the expression WRITTEN is stored in.
7224 If WRITTEN is null, say everything must be invalidated. */
7227 note_mem_written (written, writes_ptr)
7229 struct write_data *writes_ptr;
7231 static struct write_data everything = {0, 1, 1, 1};
7234 *writes_ptr = everything;
7235 else if (GET_CODE (written) == MEM)
7237 /* Pushing or popping the stack invalidates just the stack pointer. */
7238 rtx addr = XEXP (written, 0);
7239 if ((GET_CODE (addr) == PRE_DEC || GET_CODE (addr) == PRE_INC
7240 || GET_CODE (addr) == POST_DEC || GET_CODE (addr) == POST_INC)
7241 && GET_CODE (XEXP (addr, 0)) == REG
7242 && REGNO (XEXP (addr, 0)) == STACK_POINTER_REGNUM)
7247 else if (GET_MODE (written) == BLKmode)
7248 *writes_ptr = everything;
7249 /* (mem (scratch)) means clobber everything. */
7250 else if (GET_CODE (addr) == SCRATCH)
7251 *writes_ptr = everything;
7252 else if (cse_rtx_addr_varies_p (written))
7254 /* A varying address that is a sum indicates an array element,
7255 and that's just as good as a structure element
7256 in implying that we need not invalidate scalar variables.
7257 However, we must allow QImode aliasing of scalars, because the
7258 ANSI C standard allows character pointers to alias anything. */
7259 if (! ((MEM_IN_STRUCT_P (written)
7260 || GET_CODE (XEXP (written, 0)) == PLUS)
7261 && GET_MODE (written) != QImode))
7262 writes_ptr->all = 1;
7263 writes_ptr->nonscalar = 1;
7265 writes_ptr->var = 1;
7269 /* Perform invalidation on the basis of everything about an insn
7270 except for invalidating the actual places that are SET in it.
7271 This includes the places CLOBBERed, and anything that might
7272 alias with something that is SET or CLOBBERed.
7274 W points to the writes_memory for this insn, a struct write_data
7275 saying which kinds of memory references must be invalidated.
7276 X is the pattern of the insn. */
7279 invalidate_from_clobbers (w, x)
7280 struct write_data *w;
7283 /* If W->var is not set, W specifies no action.
7284 If W->all is set, this step gets all memory refs
7285 so they can be ignored in the rest of this function. */
7287 invalidate_memory (w);
7291 if (reg_tick[STACK_POINTER_REGNUM] >= 0)
7292 reg_tick[STACK_POINTER_REGNUM]++;
7294 /* This should be *very* rare. */
7295 if (TEST_HARD_REG_BIT (hard_regs_in_table, STACK_POINTER_REGNUM))
7296 invalidate (stack_pointer_rtx);
7299 if (GET_CODE (x) == CLOBBER)
7301 rtx ref = XEXP (x, 0);
7304 if (GET_CODE (ref) == REG || GET_CODE (ref) == SUBREG
7305 || (GET_CODE (ref) == MEM && ! w->all))
7307 else if (GET_CODE (ref) == STRICT_LOW_PART
7308 || GET_CODE (ref) == ZERO_EXTRACT)
7309 invalidate (XEXP (ref, 0));
7312 else if (GET_CODE (x) == PARALLEL)
7315 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
7317 register rtx y = XVECEXP (x, 0, i);
7318 if (GET_CODE (y) == CLOBBER)
7320 rtx ref = XEXP (y, 0);
7323 if (GET_CODE (ref) == REG || GET_CODE (ref) == SUBREG
7324 || (GET_CODE (ref) == MEM && !w->all))
7326 else if (GET_CODE (ref) == STRICT_LOW_PART
7327 || GET_CODE (ref) == ZERO_EXTRACT)
7328 invalidate (XEXP (ref, 0));
7335 /* Process X, part of the REG_NOTES of an insn. Look at any REG_EQUAL notes
7336 and replace any registers in them with either an equivalent constant
7337 or the canonical form of the register. If we are inside an address,
7338 only do this if the address remains valid.
7340 OBJECT is 0 except when within a MEM in which case it is the MEM.
7342 Return the replacement for X. */
7345 cse_process_notes (x, object)
7349 enum rtx_code code = GET_CODE (x);
7350 char *fmt = GET_RTX_FORMAT (code);
7366 XEXP (x, 0) = cse_process_notes (XEXP (x, 0), x);
7371 if (REG_NOTE_KIND (x) == REG_EQUAL)
7372 XEXP (x, 0) = cse_process_notes (XEXP (x, 0), NULL_RTX);
7374 XEXP (x, 1) = cse_process_notes (XEXP (x, 1), NULL_RTX);
7380 rtx new = cse_process_notes (XEXP (x, 0), object);
7381 /* We don't substitute VOIDmode constants into these rtx,
7382 since they would impede folding. */
7383 if (GET_MODE (new) != VOIDmode)
7384 validate_change (object, &XEXP (x, 0), new, 0);
7389 i = reg_qty[REGNO (x)];
7391 /* Return a constant or a constant register. */
7392 if (REGNO_QTY_VALID_P (REGNO (x))
7393 && qty_const[i] != 0
7394 && (CONSTANT_P (qty_const[i])
7395 || GET_CODE (qty_const[i]) == REG))
7397 rtx new = gen_lowpart_if_possible (GET_MODE (x), qty_const[i]);
7402 /* Otherwise, canonicalize this register. */
7403 return canon_reg (x, NULL_RTX);
7406 for (i = 0; i < GET_RTX_LENGTH (code); i++)
7408 validate_change (object, &XEXP (x, i),
7409 cse_process_notes (XEXP (x, i), object), 0);
7414 /* Find common subexpressions between the end test of a loop and the beginning
7415 of the loop. LOOP_START is the CODE_LABEL at the start of a loop.
7417 Often we have a loop where an expression in the exit test is used
7418 in the body of the loop. For example "while (*p) *q++ = *p++;".
7419 Because of the way we duplicate the loop exit test in front of the loop,
7420 however, we don't detect that common subexpression. This will be caught
7421 when global cse is implemented, but this is a quite common case.
7423 This function handles the most common cases of these common expressions.
7424 It is called after we have processed the basic block ending with the
7425 NOTE_INSN_LOOP_END note that ends a loop and the previous JUMP_INSN
7426 jumps to a label used only once. */
7429 cse_around_loop (loop_start)
7434 struct table_elt *p;
7436 /* If the jump at the end of the loop doesn't go to the start, we don't
7438 for (insn = PREV_INSN (loop_start);
7439 insn && (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) >= 0);
7440 insn = PREV_INSN (insn))
7444 || GET_CODE (insn) != NOTE
7445 || NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG)
7448 /* If the last insn of the loop (the end test) was an NE comparison,
7449 we will interpret it as an EQ comparison, since we fell through
7450 the loop. Any equivalences resulting from that comparison are
7451 therefore not valid and must be invalidated. */
7452 if (last_jump_equiv_class)
7453 for (p = last_jump_equiv_class->first_same_value; p;
7454 p = p->next_same_value)
7455 if (GET_CODE (p->exp) == MEM || GET_CODE (p->exp) == REG
7456 || GET_CODE (p->exp) == SUBREG)
7457 invalidate (p->exp);
7458 else if (GET_CODE (p->exp) == STRICT_LOW_PART
7459 || GET_CODE (p->exp) == ZERO_EXTRACT)
7460 invalidate (XEXP (p->exp, 0));
7462 /* Process insns starting after LOOP_START until we hit a CALL_INSN or
7463 a CODE_LABEL (we could handle a CALL_INSN, but it isn't worth it).
7465 The only thing we do with SET_DEST is invalidate entries, so we
7466 can safely process each SET in order. It is slightly less efficient
7467 to do so, but we only want to handle the most common cases. */
7469 for (insn = NEXT_INSN (loop_start);
7470 GET_CODE (insn) != CALL_INSN && GET_CODE (insn) != CODE_LABEL
7471 && ! (GET_CODE (insn) == NOTE
7472 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END);
7473 insn = NEXT_INSN (insn))
7475 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
7476 && (GET_CODE (PATTERN (insn)) == SET
7477 || GET_CODE (PATTERN (insn)) == CLOBBER))
7478 cse_set_around_loop (PATTERN (insn), insn, loop_start);
7479 else if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
7480 && GET_CODE (PATTERN (insn)) == PARALLEL)
7481 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
7482 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET
7483 || GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == CLOBBER)
7484 cse_set_around_loop (XVECEXP (PATTERN (insn), 0, i), insn,
7489 /* Variable used for communications between the next two routines. */
7491 static struct write_data skipped_writes_memory;
7493 /* Process one SET of an insn that was skipped. We ignore CLOBBERs
7494 since they are done elsewhere. This function is called via note_stores. */
7497 invalidate_skipped_set (dest, set)
7501 if (GET_CODE (set) == CLOBBER
7508 if (GET_CODE (dest) == MEM)
7509 note_mem_written (dest, &skipped_writes_memory);
7511 /* There are times when an address can appear varying and be a PLUS
7512 during this scan when it would be a fixed address were we to know
7513 the proper equivalences. So promote "nonscalar" to be "all". */
7514 if (skipped_writes_memory.nonscalar)
7515 skipped_writes_memory.all = 1;
7517 if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG
7518 || (! skipped_writes_memory.all && ! cse_rtx_addr_varies_p (dest)))
7520 else if (GET_CODE (dest) == STRICT_LOW_PART
7521 || GET_CODE (dest) == ZERO_EXTRACT)
7522 invalidate (XEXP (dest, 0));
7525 /* Invalidate all insns from START up to the end of the function or the
7526 next label. This called when we wish to CSE around a block that is
7527 conditionally executed. */
7530 invalidate_skipped_block (start)
7534 static struct write_data init = {0, 0, 0, 0};
7535 static struct write_data everything = {0, 1, 1, 1};
7537 for (insn = start; insn && GET_CODE (insn) != CODE_LABEL;
7538 insn = NEXT_INSN (insn))
7540 if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
7543 skipped_writes_memory = init;
7545 if (GET_CODE (insn) == CALL_INSN)
7547 invalidate_for_call ();
7548 skipped_writes_memory = everything;
7551 note_stores (PATTERN (insn), invalidate_skipped_set);
7552 invalidate_from_clobbers (&skipped_writes_memory, PATTERN (insn));
7556 /* Used for communication between the following two routines; contains a
7557 value to be checked for modification. */
7559 static rtx cse_check_loop_start_value;
7561 /* If modifying X will modify the value in CSE_CHECK_LOOP_START_VALUE,
7562 indicate that fact by setting CSE_CHECK_LOOP_START_VALUE to 0. */
7565 cse_check_loop_start (x, set)
7569 if (cse_check_loop_start_value == 0
7570 || GET_CODE (x) == CC0 || GET_CODE (x) == PC)
7573 if ((GET_CODE (x) == MEM && GET_CODE (cse_check_loop_start_value) == MEM)
7574 || reg_overlap_mentioned_p (x, cse_check_loop_start_value))
7575 cse_check_loop_start_value = 0;
7578 /* X is a SET or CLOBBER contained in INSN that was found near the start of
7579 a loop that starts with the label at LOOP_START.
7581 If X is a SET, we see if its SET_SRC is currently in our hash table.
7582 If so, we see if it has a value equal to some register used only in the
7583 loop exit code (as marked by jump.c).
7585 If those two conditions are true, we search backwards from the start of
7586 the loop to see if that same value was loaded into a register that still
7587 retains its value at the start of the loop.
7589 If so, we insert an insn after the load to copy the destination of that
7590 load into the equivalent register and (try to) replace our SET_SRC with that
7593 In any event, we invalidate whatever this SET or CLOBBER modifies. */
7596 cse_set_around_loop (x, insn, loop_start)
7601 struct table_elt *src_elt;
7602 static struct write_data init = {0, 0, 0, 0};
7603 struct write_data writes_memory;
7605 writes_memory = init;
7607 /* If this is a SET, see if we can replace SET_SRC, but ignore SETs that
7608 are setting PC or CC0 or whose SET_SRC is already a register. */
7609 if (GET_CODE (x) == SET
7610 && GET_CODE (SET_DEST (x)) != PC && GET_CODE (SET_DEST (x)) != CC0
7611 && GET_CODE (SET_SRC (x)) != REG)
7613 src_elt = lookup (SET_SRC (x),
7614 HASH (SET_SRC (x), GET_MODE (SET_DEST (x))),
7615 GET_MODE (SET_DEST (x)));
7618 for (src_elt = src_elt->first_same_value; src_elt;
7619 src_elt = src_elt->next_same_value)
7620 if (GET_CODE (src_elt->exp) == REG && REG_LOOP_TEST_P (src_elt->exp)
7621 && COST (src_elt->exp) < COST (SET_SRC (x)))
7625 /* Look for an insn in front of LOOP_START that sets
7626 something in the desired mode to SET_SRC (x) before we hit
7627 a label or CALL_INSN. */
7629 for (p = prev_nonnote_insn (loop_start);
7630 p && GET_CODE (p) != CALL_INSN
7631 && GET_CODE (p) != CODE_LABEL;
7632 p = prev_nonnote_insn (p))
7633 if ((set = single_set (p)) != 0
7634 && GET_CODE (SET_DEST (set)) == REG
7635 && GET_MODE (SET_DEST (set)) == src_elt->mode
7636 && rtx_equal_p (SET_SRC (set), SET_SRC (x)))
7638 /* We now have to ensure that nothing between P
7639 and LOOP_START modified anything referenced in
7640 SET_SRC (x). We know that nothing within the loop
7641 can modify it, or we would have invalidated it in
7645 cse_check_loop_start_value = SET_SRC (x);
7646 for (q = p; q != loop_start; q = NEXT_INSN (q))
7647 if (GET_RTX_CLASS (GET_CODE (q)) == 'i')
7648 note_stores (PATTERN (q), cse_check_loop_start);
7650 /* If nothing was changed and we can replace our
7651 SET_SRC, add an insn after P to copy its destination
7652 to what we will be replacing SET_SRC with. */
7653 if (cse_check_loop_start_value
7654 && validate_change (insn, &SET_SRC (x),
7656 emit_insn_after (gen_move_insn (src_elt->exp,
7664 /* Now invalidate anything modified by X. */
7665 note_mem_written (SET_DEST (x), &writes_memory);
7667 if (writes_memory.var)
7668 invalidate_memory (&writes_memory);
7670 /* See comment on similar code in cse_insn for explanation of these tests. */
7671 if (GET_CODE (SET_DEST (x)) == REG || GET_CODE (SET_DEST (x)) == SUBREG
7672 || (GET_CODE (SET_DEST (x)) == MEM && ! writes_memory.all
7673 && ! cse_rtx_addr_varies_p (SET_DEST (x))))
7674 invalidate (SET_DEST (x));
7675 else if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
7676 || GET_CODE (SET_DEST (x)) == ZERO_EXTRACT)
7677 invalidate (XEXP (SET_DEST (x), 0));
7680 /* Find the end of INSN's basic block and return its range,
7681 the total number of SETs in all the insns of the block, the last insn of the
7682 block, and the branch path.
7684 The branch path indicates which branches should be followed. If a non-zero
7685 path size is specified, the block should be rescanned and a different set
7686 of branches will be taken. The branch path is only used if
7687 FLAG_CSE_FOLLOW_JUMPS or FLAG_CSE_SKIP_BLOCKS is non-zero.
7689 DATA is a pointer to a struct cse_basic_block_data, defined below, that is
7690 used to describe the block. It is filled in with the information about
7691 the current block. The incoming structure's branch path, if any, is used
7692 to construct the output branch path. */
7695 cse_end_of_basic_block (insn, data, follow_jumps, after_loop, skip_blocks)
7697 struct cse_basic_block_data *data;
7704 int low_cuid = INSN_CUID (insn), high_cuid = INSN_CUID (insn);
7705 rtx next = GET_RTX_CLASS (GET_CODE (insn)) == 'i' ? insn : next_real_insn (insn);
7706 int path_size = data->path_size;
7710 /* Update the previous branch path, if any. If the last branch was
7711 previously TAKEN, mark it NOT_TAKEN. If it was previously NOT_TAKEN,
7712 shorten the path by one and look at the previous branch. We know that
7713 at least one branch must have been taken if PATH_SIZE is non-zero. */
7714 while (path_size > 0)
7716 if (data->path[path_size - 1].status != NOT_TAKEN)
7718 data->path[path_size - 1].status = NOT_TAKEN;
7725 /* Scan to end of this basic block. */
7726 while (p && GET_CODE (p) != CODE_LABEL)
7728 /* Don't cse out the end of a loop. This makes a difference
7729 only for the unusual loops that always execute at least once;
7730 all other loops have labels there so we will stop in any case.
7731 Cse'ing out the end of the loop is dangerous because it
7732 might cause an invariant expression inside the loop
7733 to be reused after the end of the loop. This would make it
7734 hard to move the expression out of the loop in loop.c,
7735 especially if it is one of several equivalent expressions
7736 and loop.c would like to eliminate it.
7738 If we are running after loop.c has finished, we can ignore
7739 the NOTE_INSN_LOOP_END. */
7741 if (! after_loop && GET_CODE (p) == NOTE
7742 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
7745 /* Don't cse over a call to setjmp; on some machines (eg vax)
7746 the regs restored by the longjmp come from
7747 a later time than the setjmp. */
7748 if (GET_CODE (p) == NOTE
7749 && NOTE_LINE_NUMBER (p) == NOTE_INSN_SETJMP)
7752 /* A PARALLEL can have lots of SETs in it,
7753 especially if it is really an ASM_OPERANDS. */
7754 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
7755 && GET_CODE (PATTERN (p)) == PARALLEL)
7756 nsets += XVECLEN (PATTERN (p), 0);
7757 else if (GET_CODE (p) != NOTE)
7760 /* Ignore insns made by CSE; they cannot affect the boundaries of
7763 if (INSN_UID (p) <= max_uid && INSN_CUID (p) > high_cuid)
7764 high_cuid = INSN_CUID (p);
7765 if (INSN_UID (p) <= max_uid && INSN_CUID (p) < low_cuid)
7766 low_cuid = INSN_CUID (p);
7768 /* See if this insn is in our branch path. If it is and we are to
7770 if (path_entry < path_size && data->path[path_entry].branch == p)
7772 if (data->path[path_entry].status != NOT_TAKEN)
7775 /* Point to next entry in path, if any. */
7779 /* If this is a conditional jump, we can follow it if -fcse-follow-jumps
7780 was specified, we haven't reached our maximum path length, there are
7781 insns following the target of the jump, this is the only use of the
7782 jump label, and the target label is preceded by a BARRIER.
7784 Alternatively, we can follow the jump if it branches around a
7785 block of code and there are no other branches into the block.
7786 In this case invalidate_skipped_block will be called to invalidate any
7787 registers set in the block when following the jump. */
7789 else if ((follow_jumps || skip_blocks) && path_size < PATHLENGTH - 1
7790 && GET_CODE (p) == JUMP_INSN
7791 && GET_CODE (PATTERN (p)) == SET
7792 && GET_CODE (SET_SRC (PATTERN (p))) == IF_THEN_ELSE
7793 && LABEL_NUSES (JUMP_LABEL (p)) == 1
7794 && NEXT_INSN (JUMP_LABEL (p)) != 0)
7796 for (q = PREV_INSN (JUMP_LABEL (p)); q; q = PREV_INSN (q))
7797 if ((GET_CODE (q) != NOTE
7798 || NOTE_LINE_NUMBER (q) == NOTE_INSN_LOOP_END
7799 || NOTE_LINE_NUMBER (q) == NOTE_INSN_SETJMP)
7800 && (GET_CODE (q) != CODE_LABEL || LABEL_NUSES (q) != 0))
7803 /* If we ran into a BARRIER, this code is an extension of the
7804 basic block when the branch is taken. */
7805 if (follow_jumps && q != 0 && GET_CODE (q) == BARRIER)
7807 /* Don't allow ourself to keep walking around an
7808 always-executed loop. */
7809 if (next_real_insn (q) == next)
7815 /* Similarly, don't put a branch in our path more than once. */
7816 for (i = 0; i < path_entry; i++)
7817 if (data->path[i].branch == p)
7820 if (i != path_entry)
7823 data->path[path_entry].branch = p;
7824 data->path[path_entry++].status = TAKEN;
7826 /* This branch now ends our path. It was possible that we
7827 didn't see this branch the last time around (when the
7828 insn in front of the target was a JUMP_INSN that was
7829 turned into a no-op). */
7830 path_size = path_entry;
7833 /* Mark block so we won't scan it again later. */
7834 PUT_MODE (NEXT_INSN (p), QImode);
7836 /* Detect a branch around a block of code. */
7837 else if (skip_blocks && q != 0 && GET_CODE (q) != CODE_LABEL)
7841 if (next_real_insn (q) == next)
7847 for (i = 0; i < path_entry; i++)
7848 if (data->path[i].branch == p)
7851 if (i != path_entry)
7854 /* This is no_labels_between_p (p, q) with an added check for
7855 reaching the end of a function (in case Q precedes P). */
7856 for (tmp = NEXT_INSN (p); tmp && tmp != q; tmp = NEXT_INSN (tmp))
7857 if (GET_CODE (tmp) == CODE_LABEL)
7862 data->path[path_entry].branch = p;
7863 data->path[path_entry++].status = AROUND;
7865 path_size = path_entry;
7868 /* Mark block so we won't scan it again later. */
7869 PUT_MODE (NEXT_INSN (p), QImode);
7876 data->low_cuid = low_cuid;
7877 data->high_cuid = high_cuid;
7878 data->nsets = nsets;
7881 /* If all jumps in the path are not taken, set our path length to zero
7882 so a rescan won't be done. */
7883 for (i = path_size - 1; i >= 0; i--)
7884 if (data->path[i].status != NOT_TAKEN)
7888 data->path_size = 0;
7890 data->path_size = path_size;
7892 /* End the current branch path. */
7893 data->path[path_size].branch = 0;
7896 /* Perform cse on the instructions of a function.
7897 F is the first instruction.
7898 NREGS is one plus the highest pseudo-reg number used in the instruction.
7900 AFTER_LOOP is 1 if this is the cse call done after loop optimization
7901 (only if -frerun-cse-after-loop).
7903 Returns 1 if jump_optimize should be redone due to simplifications
7904 in conditional jump instructions. */
7907 cse_main (f, nregs, after_loop, file)
7913 struct cse_basic_block_data val;
7914 register rtx insn = f;
7917 cse_jumps_altered = 0;
7918 constant_pool_entries_cost = 0;
7925 all_minus_one = (int *) alloca (nregs * sizeof (int));
7926 consec_ints = (int *) alloca (nregs * sizeof (int));
7928 for (i = 0; i < nregs; i++)
7930 all_minus_one[i] = -1;
7934 reg_next_eqv = (int *) alloca (nregs * sizeof (int));
7935 reg_prev_eqv = (int *) alloca (nregs * sizeof (int));
7936 reg_qty = (int *) alloca (nregs * sizeof (int));
7937 reg_in_table = (int *) alloca (nregs * sizeof (int));
7938 reg_tick = (int *) alloca (nregs * sizeof (int));
7940 /* Discard all the free elements of the previous function
7941 since they are allocated in the temporarily obstack. */
7942 bzero (table, sizeof table);
7943 free_element_chain = 0;
7944 n_elements_made = 0;
7946 /* Find the largest uid. */
7948 max_uid = get_max_uid ();
7949 uid_cuid = (int *) alloca ((max_uid + 1) * sizeof (int));
7950 bzero (uid_cuid, (max_uid + 1) * sizeof (int));
7952 /* Compute the mapping from uids to cuids.
7953 CUIDs are numbers assigned to insns, like uids,
7954 except that cuids increase monotonically through the code.
7955 Don't assign cuids to line-number NOTEs, so that the distance in cuids
7956 between two insns is not affected by -g. */
7958 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
7960 if (GET_CODE (insn) != NOTE
7961 || NOTE_LINE_NUMBER (insn) < 0)
7962 INSN_CUID (insn) = ++i;
7964 /* Give a line number note the same cuid as preceding insn. */
7965 INSN_CUID (insn) = i;
7968 /* Initialize which registers are clobbered by calls. */
7970 CLEAR_HARD_REG_SET (regs_invalidated_by_call);
7972 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7973 if ((call_used_regs[i]
7974 /* Used to check !fixed_regs[i] here, but that isn't safe;
7975 fixed regs are still call-clobbered, and sched can get
7976 confused if they can "live across calls".
7978 The frame pointer is always preserved across calls. The arg
7979 pointer is if it is fixed. The stack pointer usually is, unless
7980 RETURN_POPS_ARGS, in which case an explicit CLOBBER
7981 will be present. If we are generating PIC code, the PIC offset
7982 table register is preserved across calls. */
7984 && i != STACK_POINTER_REGNUM
7985 && i != FRAME_POINTER_REGNUM
7986 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
7987 && i != HARD_FRAME_POINTER_REGNUM
7989 #if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
7990 && ! (i == ARG_POINTER_REGNUM && fixed_regs[i])
7992 #ifdef PIC_OFFSET_TABLE_REGNUM
7993 && ! (i == PIC_OFFSET_TABLE_REGNUM && flag_pic)
7997 SET_HARD_REG_BIT (regs_invalidated_by_call, i);
7999 /* Loop over basic blocks.
8000 Compute the maximum number of qty's needed for each basic block
8001 (which is 2 for each SET). */
8005 cse_end_of_basic_block (insn, &val, flag_cse_follow_jumps, after_loop,
8006 flag_cse_skip_blocks);
8008 /* If this basic block was already processed or has no sets, skip it. */
8009 if (val.nsets == 0 || GET_MODE (insn) == QImode)
8011 PUT_MODE (insn, VOIDmode);
8012 insn = (val.last ? NEXT_INSN (val.last) : 0);
8017 cse_basic_block_start = val.low_cuid;
8018 cse_basic_block_end = val.high_cuid;
8019 max_qty = val.nsets * 2;
8022 fprintf (file, ";; Processing block from %d to %d, %d sets.\n",
8023 INSN_UID (insn), val.last ? INSN_UID (val.last) : 0,
8026 /* Make MAX_QTY bigger to give us room to optimize
8027 past the end of this basic block, if that should prove useful. */
8033 /* If this basic block is being extended by following certain jumps,
8034 (see `cse_end_of_basic_block'), we reprocess the code from the start.
8035 Otherwise, we start after this basic block. */
8036 if (val.path_size > 0)
8037 cse_basic_block (insn, val.last, val.path, 0);
8040 int old_cse_jumps_altered = cse_jumps_altered;
8043 /* When cse changes a conditional jump to an unconditional
8044 jump, we want to reprocess the block, since it will give
8045 us a new branch path to investigate. */
8046 cse_jumps_altered = 0;
8047 temp = cse_basic_block (insn, val.last, val.path, ! after_loop);
8048 if (cse_jumps_altered == 0
8049 || (flag_cse_follow_jumps == 0 && flag_cse_skip_blocks == 0))
8052 cse_jumps_altered |= old_cse_jumps_altered;
8060 /* Tell refers_to_mem_p that qty_const info is not available. */
8063 if (max_elements_made < n_elements_made)
8064 max_elements_made = n_elements_made;
8066 return cse_jumps_altered;
8069 /* Process a single basic block. FROM and TO and the limits of the basic
8070 block. NEXT_BRANCH points to the branch path when following jumps or
8071 a null path when not following jumps.
8073 AROUND_LOOP is non-zero if we are to try to cse around to the start of a
8074 loop. This is true when we are being called for the last time on a
8075 block and this CSE pass is before loop.c. */
8078 cse_basic_block (from, to, next_branch, around_loop)
8079 register rtx from, to;
8080 struct branch_path *next_branch;
8085 int in_libcall_block = 0;
8087 /* Each of these arrays is undefined before max_reg, so only allocate
8088 the space actually needed and adjust the start below. */
8090 qty_first_reg = (int *) alloca ((max_qty - max_reg) * sizeof (int));
8091 qty_last_reg = (int *) alloca ((max_qty - max_reg) * sizeof (int));
8092 qty_mode= (enum machine_mode *) alloca ((max_qty - max_reg) * sizeof (enum machine_mode));
8093 qty_const = (rtx *) alloca ((max_qty - max_reg) * sizeof (rtx));
8094 qty_const_insn = (rtx *) alloca ((max_qty - max_reg) * sizeof (rtx));
8096 = (enum rtx_code *) alloca ((max_qty - max_reg) * sizeof (enum rtx_code));
8097 qty_comparison_qty = (int *) alloca ((max_qty - max_reg) * sizeof (int));
8098 qty_comparison_const = (rtx *) alloca ((max_qty - max_reg) * sizeof (rtx));
8100 qty_first_reg -= max_reg;
8101 qty_last_reg -= max_reg;
8102 qty_mode -= max_reg;
8103 qty_const -= max_reg;
8104 qty_const_insn -= max_reg;
8105 qty_comparison_code -= max_reg;
8106 qty_comparison_qty -= max_reg;
8107 qty_comparison_const -= max_reg;
8111 /* TO might be a label. If so, protect it from being deleted. */
8112 if (to != 0 && GET_CODE (to) == CODE_LABEL)
8115 for (insn = from; insn != to; insn = NEXT_INSN (insn))
8117 register enum rtx_code code;
8119 /* See if this is a branch that is part of the path. If so, and it is
8120 to be taken, do so. */
8121 if (next_branch->branch == insn)
8123 enum taken status = next_branch++->status;
8124 if (status != NOT_TAKEN)
8126 if (status == TAKEN)
8127 record_jump_equiv (insn, 1);
8129 invalidate_skipped_block (NEXT_INSN (insn));
8131 /* Set the last insn as the jump insn; it doesn't affect cc0.
8132 Then follow this branch. */
8137 insn = JUMP_LABEL (insn);
8142 code = GET_CODE (insn);
8143 if (GET_MODE (insn) == QImode)
8144 PUT_MODE (insn, VOIDmode);
8146 if (GET_RTX_CLASS (code) == 'i')
8148 /* Process notes first so we have all notes in canonical forms when
8149 looking for duplicate operations. */
8151 if (REG_NOTES (insn))
8152 REG_NOTES (insn) = cse_process_notes (REG_NOTES (insn), NULL_RTX);
8154 /* Track when we are inside in LIBCALL block. Inside such a block,
8155 we do not want to record destinations. The last insn of a
8156 LIBCALL block is not considered to be part of the block, since
8157 its destination is the result of the block and hence should be
8160 if (find_reg_note (insn, REG_LIBCALL, NULL_RTX))
8161 in_libcall_block = 1;
8162 else if (find_reg_note (insn, REG_RETVAL, NULL_RTX))
8163 in_libcall_block = 0;
8165 cse_insn (insn, in_libcall_block);
8168 /* If INSN is now an unconditional jump, skip to the end of our
8169 basic block by pretending that we just did the last insn in the
8170 basic block. If we are jumping to the end of our block, show
8171 that we can have one usage of TO. */
8173 if (simplejump_p (insn))
8178 if (JUMP_LABEL (insn) == to)
8181 /* Maybe TO was deleted because the jump is unconditional.
8182 If so, there is nothing left in this basic block. */
8183 /* ??? Perhaps it would be smarter to set TO
8184 to whatever follows this insn,
8185 and pretend the basic block had always ended here. */
8186 if (INSN_DELETED_P (to))
8189 insn = PREV_INSN (to);
8192 /* See if it is ok to keep on going past the label
8193 which used to end our basic block. Remember that we incremented
8194 the count of that label, so we decrement it here. If we made
8195 a jump unconditional, TO_USAGE will be one; in that case, we don't
8196 want to count the use in that jump. */
8198 if (to != 0 && NEXT_INSN (insn) == to
8199 && GET_CODE (to) == CODE_LABEL && --LABEL_NUSES (to) == to_usage)
8201 struct cse_basic_block_data val;
8203 insn = NEXT_INSN (to);
8205 if (LABEL_NUSES (to) == 0)
8208 /* Find the end of the following block. Note that we won't be
8209 following branches in this case. If TO was the last insn
8210 in the function, we are done. Similarly, if we deleted the
8211 insn after TO, it must have been because it was preceded by
8212 a BARRIER. In that case, we are done with this block because it
8213 has no continuation. */
8215 if (insn == 0 || INSN_DELETED_P (insn))
8220 cse_end_of_basic_block (insn, &val, 0, 0, 0);
8222 /* If the tables we allocated have enough space left
8223 to handle all the SETs in the next basic block,
8224 continue through it. Otherwise, return,
8225 and that block will be scanned individually. */
8226 if (val.nsets * 2 + next_qty > max_qty)
8229 cse_basic_block_start = val.low_cuid;
8230 cse_basic_block_end = val.high_cuid;
8233 /* Prevent TO from being deleted if it is a label. */
8234 if (to != 0 && GET_CODE (to) == CODE_LABEL)
8237 /* Back up so we process the first insn in the extension. */
8238 insn = PREV_INSN (insn);
8242 if (next_qty > max_qty)
8245 /* If we are running before loop.c, we stopped on a NOTE_INSN_LOOP_END, and
8246 the previous insn is the only insn that branches to the head of a loop,
8247 we can cse into the loop. Don't do this if we changed the jump
8248 structure of a loop unless we aren't going to be following jumps. */
8250 if ((cse_jumps_altered == 0
8251 || (flag_cse_follow_jumps == 0 && flag_cse_skip_blocks == 0))
8252 && around_loop && to != 0
8253 && GET_CODE (to) == NOTE && NOTE_LINE_NUMBER (to) == NOTE_INSN_LOOP_END
8254 && GET_CODE (PREV_INSN (to)) == JUMP_INSN
8255 && JUMP_LABEL (PREV_INSN (to)) != 0
8256 && LABEL_NUSES (JUMP_LABEL (PREV_INSN (to))) == 1)
8257 cse_around_loop (JUMP_LABEL (PREV_INSN (to)));
8259 return to ? NEXT_INSN (to) : 0;
8262 /* Count the number of times registers are used (not set) in X.
8263 COUNTS is an array in which we accumulate the count, INCR is how much
8264 we count each register usage. */
8267 count_reg_usage (x, counts, incr)
8272 enum rtx_code code = GET_CODE (x);
8279 counts[REGNO (x)] += incr;
8293 /* Unless we are setting a REG, count everything in SET_DEST. */
8294 if (GET_CODE (SET_DEST (x)) != REG)
8295 count_reg_usage (SET_DEST (x), counts, incr);
8296 count_reg_usage (SET_SRC (x), counts, incr);
8302 count_reg_usage (PATTERN (x), counts, incr);
8304 /* Things used in a REG_EQUAL note aren't dead since loop may try to
8308 count_reg_usage (REG_NOTES (x), counts, incr);
8313 if (REG_NOTE_KIND (x) == REG_EQUAL)
8314 count_reg_usage (XEXP (x, 0), counts, incr);
8316 count_reg_usage (XEXP (x, 1), counts, incr);
8320 fmt = GET_RTX_FORMAT (code);
8321 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8324 count_reg_usage (XEXP (x, i), counts, incr);
8325 else if (fmt[i] == 'E')
8326 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8327 count_reg_usage (XVECEXP (x, i, j), counts, incr);
8331 /* Scan all the insns and delete any that are dead; i.e., they store a register
8332 that is never used or they copy a register to itself.
8334 This is used to remove insns made obviously dead by cse. It improves the
8335 heuristics in loop since it won't try to move dead invariants out of loops
8336 or make givs for dead quantities. The remaining passes of the compilation
8337 are also sped up. */
8340 delete_dead_from_cse (insns, nreg)
8344 int *counts = (int *) alloca (nreg * sizeof (int));
8350 /* First count the number of times each register is used. */
8351 bzero (counts, sizeof (int) * nreg);
8352 for (insn = next_real_insn (insns); insn; insn = next_real_insn (insn))
8353 count_reg_usage (insn, counts, 1);
8355 /* Go from the last insn to the first and delete insns that only set unused
8356 registers or copy a register to itself. As we delete an insn, remove
8357 usage counts for registers it uses. */
8358 for (insn = prev_real_insn (get_last_insn ()); insn; insn = prev)
8362 prev = prev_real_insn (insn);
8364 /* Don't delete any insns that are part of a libcall block.
8365 Flow or loop might get confused if we did that. Remember
8366 that we are scanning backwards. */
8367 if (find_reg_note (insn, REG_RETVAL, NULL_RTX))
8372 else if (GET_CODE (PATTERN (insn)) == SET)
8374 if (GET_CODE (SET_DEST (PATTERN (insn))) == REG
8375 && SET_DEST (PATTERN (insn)) == SET_SRC (PATTERN (insn)))
8379 else if (GET_CODE (SET_DEST (PATTERN (insn))) == CC0
8380 && ! side_effects_p (SET_SRC (PATTERN (insn)))
8381 && ((tem = next_nonnote_insn (insn)) == 0
8382 || GET_RTX_CLASS (GET_CODE (tem)) != 'i'
8383 || ! reg_referenced_p (cc0_rtx, PATTERN (tem))))
8386 else if (GET_CODE (SET_DEST (PATTERN (insn))) != REG
8387 || REGNO (SET_DEST (PATTERN (insn))) < FIRST_PSEUDO_REGISTER
8388 || counts[REGNO (SET_DEST (PATTERN (insn)))] != 0
8389 || side_effects_p (SET_SRC (PATTERN (insn))))
8392 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
8393 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
8395 rtx elt = XVECEXP (PATTERN (insn), 0, i);
8397 if (GET_CODE (elt) == SET)
8399 if (GET_CODE (SET_DEST (elt)) == REG
8400 && SET_DEST (elt) == SET_SRC (elt))
8404 else if (GET_CODE (SET_DEST (elt)) == CC0
8405 && ! side_effects_p (SET_SRC (elt))
8406 && ((tem = next_nonnote_insn (insn)) == 0
8407 || GET_RTX_CLASS (GET_CODE (tem)) != 'i'
8408 || ! reg_referenced_p (cc0_rtx, PATTERN (tem))))
8411 else if (GET_CODE (SET_DEST (elt)) != REG
8412 || REGNO (SET_DEST (elt)) < FIRST_PSEUDO_REGISTER
8413 || counts[REGNO (SET_DEST (elt))] != 0
8414 || side_effects_p (SET_SRC (elt)))
8417 else if (GET_CODE (elt) != CLOBBER && GET_CODE (elt) != USE)
8423 /* If this is a dead insn, delete it and show registers in it aren't
8428 count_reg_usage (insn, counts, -1);
8432 if (find_reg_note (insn, REG_LIBCALL, NULL_RTX))