1 /* Move constant computations out of loops.
2 Copyright (C) 1987, 88, 89, 91, 92, 93, 1994 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
21 /* This is the loop optimization pass of the compiler.
22 It finds invariant computations within loops and moves them
23 to the beginning of the loop. Then it identifies basic and
24 general induction variables. Strength reduction is applied to the general
25 induction variables, and induction variable elimination is applied to
26 the basic induction variables.
28 It also finds cases where
29 a register is set within the loop by zero-extending a narrower value
30 and changes these to zero the entire register once before the loop
31 and merely copy the low part within the loop.
33 Most of the complexity is in heuristics to decide when it is worth
34 while to do these things. */
41 #include "insn-config.h"
42 #include "insn-flags.h"
44 #include "hard-reg-set.h"
50 /* Vector mapping INSN_UIDs to luids.
51 The luids are like uids but increase monotonically always.
52 We use them to see whether a jump comes from outside a given loop. */
56 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
57 number the insn is contained in. */
61 /* 1 + largest uid of any insn. */
65 /* 1 + luid of last insn. */
69 /* Number of loops detected in current function. Used as index to the
72 static int max_loop_num;
74 /* Indexed by loop number, contains the first and last insn of each loop. */
76 static rtx *loop_number_loop_starts, *loop_number_loop_ends;
78 /* For each loop, gives the containing loop number, -1 if none. */
82 /* Indexed by loop number, contains a nonzero value if the "loop" isn't
83 really a loop (an insn outside the loop branches into it). */
85 static char *loop_invalid;
87 /* Indexed by loop number, links together all LABEL_REFs which refer to
88 code labels outside the loop. Used by routines that need to know all
89 loop exits, such as final_biv_value and final_giv_value.
91 This does not include loop exits due to return instructions. This is
92 because all bivs and givs are pseudos, and hence must be dead after a
93 return, so the presense of a return does not affect any of the
94 optimizations that use this info. It is simpler to just not include return
95 instructions on this list. */
97 rtx *loop_number_exit_labels;
99 /* Holds the number of loop iterations. It is zero if the number could not be
100 calculated. Must be unsigned since the number of iterations can
101 be as high as 2^wordsize-1. For loops with a wider iterator, this number
102 will will be zero if the number of loop iterations is too large for an
103 unsigned integer to hold. */
105 unsigned HOST_WIDE_INT loop_n_iterations;
107 /* Nonzero if there is a subroutine call in the current loop.
108 (unknown_address_altered is also nonzero in this case.) */
110 static int loop_has_call;
112 /* Nonzero if there is a volatile memory reference in the current
115 static int loop_has_volatile;
117 /* Added loop_continue which is the NOTE_INSN_LOOP_CONT of the
118 current loop. A continue statement will generate a branch to
119 NEXT_INSN (loop_continue). */
121 static rtx loop_continue;
123 /* Indexed by register number, contains the number of times the reg
124 is set during the loop being scanned.
125 During code motion, a negative value indicates a reg that has been
126 made a candidate; in particular -2 means that it is an candidate that
127 we know is equal to a constant and -1 means that it is an candidate
128 not known equal to a constant.
129 After code motion, regs moved have 0 (which is accurate now)
130 while the failed candidates have the original number of times set.
132 Therefore, at all times, == 0 indicates an invariant register;
133 < 0 a conditionally invariant one. */
135 static short *n_times_set;
137 /* Original value of n_times_set; same except that this value
138 is not set negative for a reg whose sets have been made candidates
139 and not set to 0 for a reg that is moved. */
141 static short *n_times_used;
143 /* Index by register number, 1 indicates that the register
144 cannot be moved or strength reduced. */
146 static char *may_not_optimize;
148 /* Nonzero means reg N has already been moved out of one loop.
149 This reduces the desire to move it out of another. */
151 static char *moved_once;
153 /* Array of MEMs that are stored in this loop. If there are too many to fit
154 here, we just turn on unknown_address_altered. */
156 #define NUM_STORES 20
157 static rtx loop_store_mems[NUM_STORES];
159 /* Index of first available slot in above array. */
160 static int loop_store_mems_idx;
162 /* Nonzero if we don't know what MEMs were changed in the current loop.
163 This happens if the loop contains a call (in which case `loop_has_call'
164 will also be set) or if we store into more than NUM_STORES MEMs. */
166 static int unknown_address_altered;
168 /* Count of movable (i.e. invariant) instructions discovered in the loop. */
169 static int num_movables;
171 /* Count of memory write instructions discovered in the loop. */
172 static int num_mem_sets;
174 /* Number of loops contained within the current one, including itself. */
175 static int loops_enclosed;
177 /* Bound on pseudo register number before loop optimization.
178 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
179 int max_reg_before_loop;
181 /* This obstack is used in product_cheap_p to allocate its rtl. It
182 may call gen_reg_rtx which, in turn, may reallocate regno_reg_rtx.
183 If we used the same obstack that it did, we would be deallocating
186 static struct obstack temp_obstack;
188 /* This is where the pointer to the obstack being used for RTL is stored. */
190 extern struct obstack *rtl_obstack;
192 #define obstack_chunk_alloc xmalloc
193 #define obstack_chunk_free free
195 extern char *oballoc ();
197 /* During the analysis of a loop, a chain of `struct movable's
198 is made to record all the movable insns found.
199 Then the entire chain can be scanned to decide which to move. */
203 rtx insn; /* A movable insn */
204 rtx set_src; /* The expression this reg is set from. */
205 rtx set_dest; /* The destination of this SET. */
206 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
207 of any registers used within the LIBCALL. */
208 int consec; /* Number of consecutive following insns
209 that must be moved with this one. */
210 int regno; /* The register it sets */
211 short lifetime; /* lifetime of that register;
212 may be adjusted when matching movables
213 that load the same value are found. */
214 short savings; /* Number of insns we can move for this reg,
215 including other movables that force this
216 or match this one. */
217 unsigned int cond : 1; /* 1 if only conditionally movable */
218 unsigned int force : 1; /* 1 means MUST move this insn */
219 unsigned int global : 1; /* 1 means reg is live outside this loop */
220 /* If PARTIAL is 1, GLOBAL means something different:
221 that the reg is live outside the range from where it is set
222 to the following label. */
223 unsigned int done : 1; /* 1 inhibits further processing of this */
225 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
226 In particular, moving it does not make it
228 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
229 load SRC, rather than copying INSN. */
230 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
231 enum machine_mode savemode; /* Nonzero means it is a mode for a low part
232 that we should avoid changing when clearing
233 the rest of the reg. */
234 struct movable *match; /* First entry for same value */
235 struct movable *forces; /* An insn that must be moved if this is */
236 struct movable *next;
239 FILE *loop_dump_stream;
241 /* Forward declarations. */
243 static void find_and_verify_loops ();
244 static void mark_loop_jump ();
245 static void prescan_loop ();
246 static int reg_in_basic_block_p ();
247 static int consec_sets_invariant_p ();
248 static rtx libcall_other_reg ();
249 static int labels_in_range_p ();
250 static void count_loop_regs_set ();
251 static void note_addr_stored ();
252 static int loop_reg_used_before_p ();
253 static void scan_loop ();
254 static void replace_call_address ();
255 static rtx skip_consec_insns ();
256 static int libcall_benefit ();
257 static void ignore_some_movables ();
258 static void force_movables ();
259 static void combine_movables ();
260 static int rtx_equal_for_loop_p ();
261 static void move_movables ();
262 static void strength_reduce ();
263 static int valid_initial_value_p ();
264 static void find_mem_givs ();
265 static void record_biv ();
266 static void check_final_value ();
267 static void record_giv ();
268 static void update_giv_derive ();
269 static int basic_induction_var ();
270 static rtx simplify_giv_expr ();
271 static int general_induction_var ();
272 static int consec_sets_giv ();
273 static int check_dbra_loop ();
274 static rtx express_from ();
275 static int combine_givs_p ();
276 static void combine_givs ();
277 static int product_cheap_p ();
278 static int maybe_eliminate_biv ();
279 static int maybe_eliminate_biv_1 ();
280 static int last_use_this_basic_block ();
281 static void record_initial ();
282 static void update_reg_last_use ();
284 /* Relative gain of eliminating various kinds of operations. */
291 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
292 copy the value of the strength reduced giv to its original register. */
298 char *free_point = (char *) oballoc (1);
299 rtx reg = gen_rtx (REG, word_mode, 0);
300 rtx pow2 = GEN_INT (32);
304 add_cost = rtx_cost (gen_rtx (PLUS, word_mode, reg, reg), SET);
306 /* We multiply by 2 to reconcile the difference in scale between
307 these two ways of computing costs. Otherwise the cost of a copy
308 will be far less than the cost of an add. */
312 /* Free the objects we just allocated. */
315 /* Initialize the obstack used for rtl in product_cheap_p. */
316 gcc_obstack_init (&temp_obstack);
319 /* Entry point of this file. Perform loop optimization
320 on the current function. F is the first insn of the function
321 and DUMPFILE is a stream for output of a trace of actions taken
322 (or 0 if none should be output). */
325 loop_optimize (f, dumpfile)
326 /* f is the first instruction of a chain of insns for one function */
335 loop_dump_stream = dumpfile;
337 init_recog_no_volatile ();
338 init_alias_analysis ();
340 max_reg_before_loop = max_reg_num ();
342 moved_once = (char *) alloca (max_reg_before_loop);
343 bzero (moved_once, max_reg_before_loop);
347 /* Count the number of loops. */
350 for (insn = f; insn; insn = NEXT_INSN (insn))
352 if (GET_CODE (insn) == NOTE
353 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
357 /* Don't waste time if no loops. */
358 if (max_loop_num == 0)
361 /* Get size to use for tables indexed by uids.
362 Leave some space for labels allocated by find_and_verify_loops. */
363 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
365 uid_luid = (int *) alloca (max_uid_for_loop * sizeof (int));
366 uid_loop_num = (int *) alloca (max_uid_for_loop * sizeof (int));
368 bzero (uid_luid, max_uid_for_loop * sizeof (int));
369 bzero (uid_loop_num, max_uid_for_loop * sizeof (int));
371 /* Allocate tables for recording each loop. We set each entry, so they need
373 loop_number_loop_starts = (rtx *) alloca (max_loop_num * sizeof (rtx));
374 loop_number_loop_ends = (rtx *) alloca (max_loop_num * sizeof (rtx));
375 loop_outer_loop = (int *) alloca (max_loop_num * sizeof (int));
376 loop_invalid = (char *) alloca (max_loop_num * sizeof (char));
377 loop_number_exit_labels = (rtx *) alloca (max_loop_num * sizeof (rtx));
379 /* Find and process each loop.
380 First, find them, and record them in order of their beginnings. */
381 find_and_verify_loops (f);
383 /* Now find all register lifetimes. This must be done after
384 find_and_verify_loops, because it might reorder the insns in the
386 reg_scan (f, max_reg_num (), 1);
388 /* See if we went too far. */
389 if (get_max_uid () > max_uid_for_loop)
392 /* Compute the mapping from uids to luids.
393 LUIDs are numbers assigned to insns, like uids,
394 except that luids increase monotonically through the code.
395 Don't assign luids to line-number NOTEs, so that the distance in luids
396 between two insns is not affected by -g. */
398 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
401 if (GET_CODE (insn) != NOTE
402 || NOTE_LINE_NUMBER (insn) <= 0)
403 uid_luid[INSN_UID (insn)] = ++i;
405 /* Give a line number note the same luid as preceding insn. */
406 uid_luid[INSN_UID (insn)] = i;
411 /* Don't leave gaps in uid_luid for insns that have been
412 deleted. It is possible that the first or last insn
413 using some register has been deleted by cross-jumping.
414 Make sure that uid_luid for that former insn's uid
415 points to the general area where that insn used to be. */
416 for (i = 0; i < max_uid_for_loop; i++)
418 uid_luid[0] = uid_luid[i];
419 if (uid_luid[0] != 0)
422 for (i = 0; i < max_uid_for_loop; i++)
423 if (uid_luid[i] == 0)
424 uid_luid[i] = uid_luid[i - 1];
426 /* Create a mapping from loops to BLOCK tree nodes. */
427 if (flag_unroll_loops && write_symbols != NO_DEBUG)
428 find_loop_tree_blocks ();
430 /* Now scan the loops, last ones first, since this means inner ones are done
431 before outer ones. */
432 for (i = max_loop_num-1; i >= 0; i--)
433 if (! loop_invalid[i] && loop_number_loop_ends[i])
434 scan_loop (loop_number_loop_starts[i], loop_number_loop_ends[i],
437 /* If debugging and unrolling loops, we must replicate the tree nodes
438 corresponding to the blocks inside the loop, so that the original one
439 to one mapping will remain. */
440 if (flag_unroll_loops && write_symbols != NO_DEBUG)
441 unroll_block_trees ();
444 /* Optimize one loop whose start is LOOP_START and end is END.
445 LOOP_START is the NOTE_INSN_LOOP_BEG and END is the matching
446 NOTE_INSN_LOOP_END. */
448 /* ??? Could also move memory writes out of loops if the destination address
449 is invariant, the source is invariant, the memory write is not volatile,
450 and if we can prove that no read inside the loop can read this address
451 before the write occurs. If there is a read of this address after the
452 write, then we can also mark the memory read as invariant. */
455 scan_loop (loop_start, end, nregs)
461 /* 1 if we are scanning insns that could be executed zero times. */
463 /* 1 if we are scanning insns that might never be executed
464 due to a subroutine call which might exit before they are reached. */
466 /* For a rotated loop that is entered near the bottom,
467 this is the label at the top. Otherwise it is zero. */
469 /* Jump insn that enters the loop, or 0 if control drops in. */
470 rtx loop_entry_jump = 0;
471 /* Place in the loop where control enters. */
473 /* Number of insns in the loop. */
478 /* The SET from an insn, if it is the only SET in the insn. */
480 /* Chain describing insns movable in current loop. */
481 struct movable *movables = 0;
482 /* Last element in `movables' -- so we can add elements at the end. */
483 struct movable *last_movable = 0;
484 /* Ratio of extra register life span we can justify
485 for saving an instruction. More if loop doesn't call subroutines
486 since in that case saving an insn makes more difference
487 and more registers are available. */
489 /* If we have calls, contains the insn in which a register was used
490 if it was used exactly once; contains const0_rtx if it was used more
492 rtx *reg_single_usage = 0;
494 n_times_set = (short *) alloca (nregs * sizeof (short));
495 n_times_used = (short *) alloca (nregs * sizeof (short));
496 may_not_optimize = (char *) alloca (nregs);
498 /* Determine whether this loop starts with a jump down to a test at
499 the end. This will occur for a small number of loops with a test
500 that is too complex to duplicate in front of the loop.
502 We search for the first insn or label in the loop, skipping NOTEs.
503 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
504 (because we might have a loop executed only once that contains a
505 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
506 (in case we have a degenerate loop).
508 Note that if we mistakenly think that a loop is entered at the top
509 when, in fact, it is entered at the exit test, the only effect will be
510 slightly poorer optimization. Making the opposite error can generate
511 incorrect code. Since very few loops now start with a jump to the
512 exit test, the code here to detect that case is very conservative. */
514 for (p = NEXT_INSN (loop_start);
516 && GET_CODE (p) != CODE_LABEL && GET_RTX_CLASS (GET_CODE (p)) != 'i'
517 && (GET_CODE (p) != NOTE
518 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
519 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
525 /* Set up variables describing this loop. */
526 prescan_loop (loop_start, end);
527 threshold = (loop_has_call ? 1 : 2) * (1 + n_non_fixed_regs);
529 /* If loop has a jump before the first label,
530 the true entry is the target of that jump.
531 Start scan from there.
532 But record in LOOP_TOP the place where the end-test jumps
533 back to so we can scan that after the end of the loop. */
534 if (GET_CODE (p) == JUMP_INSN)
538 /* Loop entry must be unconditional jump (and not a RETURN) */
540 && JUMP_LABEL (p) != 0
541 /* Check to see whether the jump actually
542 jumps out of the loop (meaning it's no loop).
543 This case can happen for things like
544 do {..} while (0). If this label was generated previously
545 by loop, we can't tell anything about it and have to reject
547 && INSN_UID (JUMP_LABEL (p)) < max_uid_for_loop
548 && INSN_LUID (JUMP_LABEL (p)) >= INSN_LUID (loop_start)
549 && INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (end))
551 loop_top = next_label (scan_start);
552 scan_start = JUMP_LABEL (p);
556 /* If SCAN_START was an insn created by loop, we don't know its luid
557 as required by loop_reg_used_before_p. So skip such loops. (This
558 test may never be true, but it's best to play it safe.)
560 Also, skip loops where we do not start scanning at a label. This
561 test also rejects loops starting with a JUMP_INSN that failed the
564 if (INSN_UID (scan_start) >= max_uid_for_loop
565 || GET_CODE (scan_start) != CODE_LABEL)
567 if (loop_dump_stream)
568 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
569 INSN_UID (loop_start), INSN_UID (end));
573 /* Count number of times each reg is set during this loop.
574 Set may_not_optimize[I] if it is not safe to move out
575 the setting of register I. If this loop has calls, set
576 reg_single_usage[I]. */
578 bzero (n_times_set, nregs * sizeof (short));
579 bzero (may_not_optimize, nregs);
583 reg_single_usage = (rtx *) alloca (nregs * sizeof (rtx));
584 bzero (reg_single_usage, nregs * sizeof (rtx));
587 count_loop_regs_set (loop_top ? loop_top : loop_start, end,
588 may_not_optimize, reg_single_usage, &insn_count, nregs);
590 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
591 may_not_optimize[i] = 1, n_times_set[i] = 1;
592 bcopy (n_times_set, n_times_used, nregs * sizeof (short));
594 if (loop_dump_stream)
596 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
597 INSN_UID (loop_start), INSN_UID (end), insn_count);
599 fprintf (loop_dump_stream, "Continue at insn %d.\n",
600 INSN_UID (loop_continue));
603 /* Scan through the loop finding insns that are safe to move.
604 Set n_times_set negative for the reg being set, so that
605 this reg will be considered invariant for subsequent insns.
606 We consider whether subsequent insns use the reg
607 in deciding whether it is worth actually moving.
609 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
610 and therefore it is possible that the insns we are scanning
611 would never be executed. At such times, we must make sure
612 that it is safe to execute the insn once instead of zero times.
613 When MAYBE_NEVER is 0, all insns will be executed at least once
614 so that is not a problem. */
620 /* At end of a straight-in loop, we are done.
621 At end of a loop entered at the bottom, scan the top. */
634 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
635 && find_reg_note (p, REG_LIBCALL, NULL_RTX))
637 else if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
638 && find_reg_note (p, REG_RETVAL, NULL_RTX))
641 if (GET_CODE (p) == INSN
642 && (set = single_set (p))
643 && GET_CODE (SET_DEST (set)) == REG
644 && ! may_not_optimize[REGNO (SET_DEST (set))])
649 rtx src = SET_SRC (set);
650 rtx dependencies = 0;
652 /* Figure out what to use as a source of this insn. If a REG_EQUIV
653 note is given or if a REG_EQUAL note with a constant operand is
654 specified, use it as the source and mark that we should move
655 this insn by calling emit_move_insn rather that duplicating the
658 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note
660 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
662 src = XEXP (temp, 0), move_insn = 1;
665 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
666 if (temp && CONSTANT_P (XEXP (temp, 0)))
667 src = XEXP (temp, 0), move_insn = 1;
668 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
670 src = XEXP (temp, 0);
671 /* A libcall block can use regs that don't appear in
672 the equivalent expression. To move the libcall,
673 we must move those regs too. */
674 dependencies = libcall_other_reg (p, src);
678 /* Don't try to optimize a register that was made
679 by loop-optimization for an inner loop.
680 We don't know its life-span, so we can't compute the benefit. */
681 if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
683 /* In order to move a register, we need to have one of three cases:
684 (1) it is used only in the same basic block as the set
685 (2) it is not a user variable and it is not used in the
686 exit test (this can cause the variable to be used
687 before it is set just like a user-variable).
688 (3) the set is guaranteed to be executed once the loop starts,
689 and the reg is not used until after that. */
690 else if (! ((! maybe_never
691 && ! loop_reg_used_before_p (set, p, loop_start,
693 || (! REG_USERVAR_P (SET_DEST (set))
694 && ! REG_LOOP_TEST_P (SET_DEST (set)))
695 || reg_in_basic_block_p (p, SET_DEST (set))))
697 else if ((tem = invariant_p (src))
698 && (dependencies == 0
699 || (tem2 = invariant_p (dependencies)) != 0)
700 && (n_times_set[REGNO (SET_DEST (set))] == 1
702 = consec_sets_invariant_p (SET_DEST (set),
703 n_times_set[REGNO (SET_DEST (set))],
705 /* If the insn can cause a trap (such as divide by zero),
706 can't move it unless it's guaranteed to be executed
707 once loop is entered. Even a function call might
708 prevent the trap insn from being reached
709 (since it might exit!) */
710 && ! ((maybe_never || call_passed)
711 && may_trap_p (src)))
713 register struct movable *m;
714 register int regno = REGNO (SET_DEST (set));
716 /* A potential lossage is where we have a case where two insns
717 can be combined as long as they are both in the loop, but
718 we move one of them outside the loop. For large loops,
719 this can lose. The most common case of this is the address
720 of a function being called.
722 Therefore, if this register is marked as being used exactly
723 once if we are in a loop with calls (a "large loop"), see if
724 we can replace the usage of this register with the source
725 of this SET. If we can, delete this insn.
727 Don't do this if P has a REG_RETVAL note or if we have
728 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
730 if (reg_single_usage && reg_single_usage[regno] != 0
731 && reg_single_usage[regno] != const0_rtx
732 && regno_first_uid[regno] == INSN_UID (p)
733 && (regno_last_uid[regno]
734 == INSN_UID (reg_single_usage[regno]))
735 && n_times_set[REGNO (SET_DEST (set))] == 1
736 && ! side_effects_p (SET_SRC (set))
737 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
738 #ifdef SMALL_REGISTER_CLASSES
739 && ! (GET_CODE (SET_SRC (set)) == REG
740 && REGNO (SET_SRC (set)) < FIRST_PSEUDO_REGISTER)
742 /* This test is not redundant; SET_SRC (set) might be
743 a call-clobbered register and the life of REGNO
744 might span a call. */
745 && ! modified_between_p (SET_SRC (set), p,
746 reg_single_usage[regno])
747 && no_labels_between_p (p, reg_single_usage[regno])
748 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
749 reg_single_usage[regno]))
751 /* Replace any usage in a REG_EQUAL note. */
752 REG_NOTES (reg_single_usage[regno])
753 = replace_rtx (REG_NOTES (reg_single_usage[regno]),
754 SET_DEST (set), SET_SRC (set));
757 NOTE_LINE_NUMBER (p) = NOTE_INSN_DELETED;
758 NOTE_SOURCE_FILE (p) = 0;
759 n_times_set[regno] = 0;
763 m = (struct movable *) alloca (sizeof (struct movable));
767 m->dependencies = dependencies;
768 m->set_dest = SET_DEST (set);
770 m->consec = n_times_set[REGNO (SET_DEST (set))] - 1;
774 m->move_insn = move_insn;
775 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
776 m->savemode = VOIDmode;
778 /* Set M->cond if either invariant_p or consec_sets_invariant_p
779 returned 2 (only conditionally invariant). */
780 m->cond = ((tem | tem1 | tem2) > 1);
781 m->global = (uid_luid[regno_last_uid[regno]] > INSN_LUID (end)
782 || uid_luid[regno_first_uid[regno]] < INSN_LUID (loop_start));
784 m->lifetime = (uid_luid[regno_last_uid[regno]]
785 - uid_luid[regno_first_uid[regno]]);
786 m->savings = n_times_used[regno];
787 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
788 m->savings += libcall_benefit (p);
789 n_times_set[regno] = move_insn ? -2 : -1;
790 /* Add M to the end of the chain MOVABLES. */
794 last_movable->next = m;
799 /* Skip this insn, not checking REG_LIBCALL notes. */
800 p = next_nonnote_insn (p);
801 /* Skip the consecutive insns, if there are any. */
802 p = skip_consec_insns (p, m->consec);
803 /* Back up to the last insn of the consecutive group. */
804 p = prev_nonnote_insn (p);
806 /* We must now reset m->move_insn, m->is_equiv, and possibly
807 m->set_src to correspond to the effects of all the
809 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
811 m->set_src = XEXP (temp, 0), m->move_insn = 1;
814 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
815 if (temp && CONSTANT_P (XEXP (temp, 0)))
816 m->set_src = XEXP (temp, 0), m->move_insn = 1;
821 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
824 /* If this register is always set within a STRICT_LOW_PART
825 or set to zero, then its high bytes are constant.
826 So clear them outside the loop and within the loop
827 just load the low bytes.
828 We must check that the machine has an instruction to do so.
829 Also, if the value loaded into the register
830 depends on the same register, this cannot be done. */
831 else if (SET_SRC (set) == const0_rtx
832 && GET_CODE (NEXT_INSN (p)) == INSN
833 && (set1 = single_set (NEXT_INSN (p)))
834 && GET_CODE (set1) == SET
835 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
836 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
837 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
839 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
841 register int regno = REGNO (SET_DEST (set));
842 if (n_times_set[regno] == 2)
844 register struct movable *m;
845 m = (struct movable *) alloca (sizeof (struct movable));
848 m->set_dest = SET_DEST (set);
856 /* If the insn may not be executed on some cycles,
857 we can't clear the whole reg; clear just high part.
858 Not even if the reg is used only within this loop.
865 Clearing x before the inner loop could clobber a value
866 being saved from the last time around the outer loop.
867 However, if the reg is not used outside this loop
868 and all uses of the register are in the same
869 basic block as the store, there is no problem.
871 If this insn was made by loop, we don't know its
872 INSN_LUID and hence must make a conservative
874 m->global = (INSN_UID (p) >= max_uid_for_loop
875 || (uid_luid[regno_last_uid[regno]]
877 || (uid_luid[regno_first_uid[regno]]
879 || (labels_in_range_p
880 (p, uid_luid[regno_first_uid[regno]])));
881 if (maybe_never && m->global)
882 m->savemode = GET_MODE (SET_SRC (set1));
884 m->savemode = VOIDmode;
888 m->lifetime = (uid_luid[regno_last_uid[regno]]
889 - uid_luid[regno_first_uid[regno]]);
891 n_times_set[regno] = -1;
892 /* Add M to the end of the chain MOVABLES. */
896 last_movable->next = m;
901 /* Past a call insn, we get to insns which might not be executed
902 because the call might exit. This matters for insns that trap.
903 Call insns inside a REG_LIBCALL/REG_RETVAL block always return,
904 so they don't count. */
905 else if (GET_CODE (p) == CALL_INSN && ! in_libcall)
907 /* Past a label or a jump, we get to insns for which we
908 can't count on whether or how many times they will be
909 executed during each iteration. Therefore, we can
910 only move out sets of trivial variables
911 (those not used after the loop). */
912 /* This code appears in three places, once in scan_loop, and twice
913 in strength_reduce. */
914 else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
915 /* If we enter the loop in the middle, and scan around to the
916 beginning, don't set maybe_never for that. This must be an
917 unconditional jump, otherwise the code at the top of the
918 loop might never be executed. Unconditional jumps are
919 followed a by barrier then loop end. */
920 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop_top
921 && NEXT_INSN (NEXT_INSN (p)) == end
922 && simplejump_p (p)))
924 /* At the virtual top of a converted loop, insns are again known to
925 be executed: logically, the loop begins here even though the exit
926 code has been duplicated. */
927 else if (GET_CODE (p) == NOTE
928 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP)
929 maybe_never = call_passed = 0;
932 /* If one movable subsumes another, ignore that other. */
934 ignore_some_movables (movables);
936 /* For each movable insn, see if the reg that it loads
937 leads when it dies right into another conditionally movable insn.
938 If so, record that the second insn "forces" the first one,
939 since the second can be moved only if the first is. */
941 force_movables (movables);
943 /* See if there are multiple movable insns that load the same value.
944 If there are, make all but the first point at the first one
945 through the `match' field, and add the priorities of them
946 all together as the priority of the first. */
948 combine_movables (movables, nregs);
950 /* Now consider each movable insn to decide whether it is worth moving.
951 Store 0 in n_times_set for each reg that is moved. */
953 move_movables (movables, threshold,
954 insn_count, loop_start, end, nregs);
956 /* Now candidates that still are negative are those not moved.
957 Change n_times_set to indicate that those are not actually invariant. */
958 for (i = 0; i < nregs; i++)
959 if (n_times_set[i] < 0)
960 n_times_set[i] = n_times_used[i];
962 if (flag_strength_reduce)
963 strength_reduce (scan_start, end, loop_top,
964 insn_count, loop_start, end);
967 /* Add elements to *OUTPUT to record all the pseudo-regs
968 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
971 record_excess_regs (in_this, not_in_this, output)
972 rtx in_this, not_in_this;
979 code = GET_CODE (in_this);
993 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
994 && ! reg_mentioned_p (in_this, not_in_this))
995 *output = gen_rtx (EXPR_LIST, VOIDmode, in_this, *output);
999 fmt = GET_RTX_FORMAT (code);
1000 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1007 for (j = 0; j < XVECLEN (in_this, i); j++)
1008 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1012 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1018 /* Check what regs are referred to in the libcall block ending with INSN,
1019 aside from those mentioned in the equivalent value.
1020 If there are none, return 0.
1021 If there are one or more, return an EXPR_LIST containing all of them. */
1024 libcall_other_reg (insn, equiv)
1027 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1028 rtx p = XEXP (note, 0);
1031 /* First, find all the regs used in the libcall block
1032 that are not mentioned as inputs to the result. */
1036 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1037 || GET_CODE (p) == CALL_INSN)
1038 record_excess_regs (PATTERN (p), equiv, &output);
1045 /* Return 1 if all uses of REG
1046 are between INSN and the end of the basic block. */
1049 reg_in_basic_block_p (insn, reg)
1052 int regno = REGNO (reg);
1055 if (regno_first_uid[regno] != INSN_UID (insn))
1058 /* Search this basic block for the already recorded last use of the reg. */
1059 for (p = insn; p; p = NEXT_INSN (p))
1061 switch (GET_CODE (p))
1068 /* Ordinary insn: if this is the last use, we win. */
1069 if (regno_last_uid[regno] == INSN_UID (p))
1074 /* Jump insn: if this is the last use, we win. */
1075 if (regno_last_uid[regno] == INSN_UID (p))
1077 /* Otherwise, it's the end of the basic block, so we lose. */
1082 /* It's the end of the basic block, so we lose. */
1087 /* The "last use" doesn't follow the "first use"?? */
1091 /* Compute the benefit of eliminating the insns in the block whose
1092 last insn is LAST. This may be a group of insns used to compute a
1093 value directly or can contain a library call. */
1096 libcall_benefit (last)
1102 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1103 insn != last; insn = NEXT_INSN (insn))
1105 if (GET_CODE (insn) == CALL_INSN)
1106 benefit += 10; /* Assume at least this many insns in a library
1108 else if (GET_CODE (insn) == INSN
1109 && GET_CODE (PATTERN (insn)) != USE
1110 && GET_CODE (PATTERN (insn)) != CLOBBER)
1117 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1120 skip_consec_insns (insn, count)
1124 for (; count > 0; count--)
1128 /* If first insn of libcall sequence, skip to end. */
1129 /* Do this at start of loop, since INSN is guaranteed to
1131 if (GET_CODE (insn) != NOTE
1132 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1133 insn = XEXP (temp, 0);
1135 do insn = NEXT_INSN (insn);
1136 while (GET_CODE (insn) == NOTE);
1142 /* Ignore any movable whose insn falls within a libcall
1143 which is part of another movable.
1144 We make use of the fact that the movable for the libcall value
1145 was made later and so appears later on the chain. */
1148 ignore_some_movables (movables)
1149 struct movable *movables;
1151 register struct movable *m, *m1;
1153 for (m = movables; m; m = m->next)
1155 /* Is this a movable for the value of a libcall? */
1156 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1160 /* Check for earlier movables inside that range,
1161 and mark them invalid. We cannot use LUIDs here because
1162 insns created by loop.c for prior loops don't have LUIDs.
1163 Rather than reject all such insns from movables, we just
1164 explicitly check each insn in the libcall (since invariant
1165 libcalls aren't that common). */
1166 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1167 for (m1 = movables; m1 != m; m1 = m1->next)
1168 if (m1->insn == insn)
1174 /* For each movable insn, see if the reg that it loads
1175 leads when it dies right into another conditionally movable insn.
1176 If so, record that the second insn "forces" the first one,
1177 since the second can be moved only if the first is. */
1180 force_movables (movables)
1181 struct movable *movables;
1183 register struct movable *m, *m1;
1184 for (m1 = movables; m1; m1 = m1->next)
1185 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1186 if (!m1->partial && !m1->done)
1188 int regno = m1->regno;
1189 for (m = m1->next; m; m = m->next)
1190 /* ??? Could this be a bug? What if CSE caused the
1191 register of M1 to be used after this insn?
1192 Since CSE does not update regno_last_uid,
1193 this insn M->insn might not be where it dies.
1194 But very likely this doesn't matter; what matters is
1195 that M's reg is computed from M1's reg. */
1196 if (INSN_UID (m->insn) == regno_last_uid[regno]
1199 if (m != 0 && m->set_src == m1->set_dest
1200 /* If m->consec, m->set_src isn't valid. */
1204 /* Increase the priority of the moving the first insn
1205 since it permits the second to be moved as well. */
1209 m1->lifetime += m->lifetime;
1210 m1->savings += m1->savings;
1215 /* Find invariant expressions that are equal and can be combined into
1219 combine_movables (movables, nregs)
1220 struct movable *movables;
1223 register struct movable *m;
1224 char *matched_regs = (char *) alloca (nregs);
1225 enum machine_mode mode;
1227 /* Regs that are set more than once are not allowed to match
1228 or be matched. I'm no longer sure why not. */
1229 /* Perhaps testing m->consec_sets would be more appropriate here? */
1231 for (m = movables; m; m = m->next)
1232 if (m->match == 0 && n_times_used[m->regno] == 1 && !m->partial)
1234 register struct movable *m1;
1235 int regno = m->regno;
1236 rtx reg_note, reg_note1;
1238 bzero (matched_regs, nregs);
1239 matched_regs[regno] = 1;
1241 for (m1 = movables; m1; m1 = m1->next)
1242 if (m != m1 && m1->match == 0 && n_times_used[m1->regno] == 1
1243 /* A reg used outside the loop mustn't be eliminated. */
1245 /* A reg used for zero-extending mustn't be eliminated. */
1247 && (matched_regs[m1->regno]
1250 /* Can combine regs with different modes loaded from the
1251 same constant only if the modes are the same or
1252 if both are integer modes with M wider or the same
1253 width as M1. The check for integer is redundant, but
1254 safe, since the only case of differing destination
1255 modes with equal sources is when both sources are
1256 VOIDmode, i.e., CONST_INT. */
1257 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1258 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1259 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1260 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1261 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1262 /* See if the source of M1 says it matches M. */
1263 && ((GET_CODE (m1->set_src) == REG
1264 && matched_regs[REGNO (m1->set_src)])
1265 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1267 && ((m->dependencies == m1->dependencies)
1268 || rtx_equal_p (m->dependencies, m1->dependencies)))
1270 m->lifetime += m1->lifetime;
1271 m->savings += m1->savings;
1274 matched_regs[m1->regno] = 1;
1278 /* Now combine the regs used for zero-extension.
1279 This can be done for those not marked `global'
1280 provided their lives don't overlap. */
1282 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1283 mode = GET_MODE_WIDER_MODE (mode))
1285 register struct movable *m0 = 0;
1287 /* Combine all the registers for extension from mode MODE.
1288 Don't combine any that are used outside this loop. */
1289 for (m = movables; m; m = m->next)
1290 if (m->partial && ! m->global
1291 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1293 register struct movable *m1;
1294 int first = uid_luid[regno_first_uid[m->regno]];
1295 int last = uid_luid[regno_last_uid[m->regno]];
1299 /* First one: don't check for overlap, just record it. */
1304 /* Make sure they extend to the same mode.
1305 (Almost always true.) */
1306 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1309 /* We already have one: check for overlap with those
1310 already combined together. */
1311 for (m1 = movables; m1 != m; m1 = m1->next)
1312 if (m1 == m0 || (m1->partial && m1->match == m0))
1313 if (! (uid_luid[regno_first_uid[m1->regno]] > last
1314 || uid_luid[regno_last_uid[m1->regno]] < first))
1317 /* No overlap: we can combine this with the others. */
1318 m0->lifetime += m->lifetime;
1319 m0->savings += m->savings;
1328 /* Return 1 if regs X and Y will become the same if moved. */
1331 regs_match_p (x, y, movables)
1333 struct movable *movables;
1337 struct movable *mx, *my;
1339 for (mx = movables; mx; mx = mx->next)
1340 if (mx->regno == xn)
1343 for (my = movables; my; my = my->next)
1344 if (my->regno == yn)
1348 && ((mx->match == my->match && mx->match != 0)
1350 || mx == my->match));
1353 /* Return 1 if X and Y are identical-looking rtx's.
1354 This is the Lisp function EQUAL for rtx arguments.
1356 If two registers are matching movables or a movable register and an
1357 equivalent constant, consider them equal. */
1360 rtx_equal_for_loop_p (x, y, movables)
1362 struct movable *movables;
1366 register struct movable *m;
1367 register enum rtx_code code;
1372 if (x == 0 || y == 0)
1375 code = GET_CODE (x);
1377 /* If we have a register and a constant, they may sometimes be
1379 if (GET_CODE (x) == REG && n_times_set[REGNO (x)] == -2
1381 for (m = movables; m; m = m->next)
1382 if (m->move_insn && m->regno == REGNO (x)
1383 && rtx_equal_p (m->set_src, y))
1386 else if (GET_CODE (y) == REG && n_times_set[REGNO (y)] == -2
1388 for (m = movables; m; m = m->next)
1389 if (m->move_insn && m->regno == REGNO (y)
1390 && rtx_equal_p (m->set_src, x))
1393 /* Otherwise, rtx's of different codes cannot be equal. */
1394 if (code != GET_CODE (y))
1397 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1398 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1400 if (GET_MODE (x) != GET_MODE (y))
1403 /* These three types of rtx's can be compared nonrecursively. */
1405 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1407 if (code == LABEL_REF)
1408 return XEXP (x, 0) == XEXP (y, 0);
1409 if (code == SYMBOL_REF)
1410 return XSTR (x, 0) == XSTR (y, 0);
1412 /* Compare the elements. If any pair of corresponding elements
1413 fail to match, return 0 for the whole things. */
1415 fmt = GET_RTX_FORMAT (code);
1416 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1421 if (XWINT (x, i) != XWINT (y, i))
1426 if (XINT (x, i) != XINT (y, i))
1431 /* Two vectors must have the same length. */
1432 if (XVECLEN (x, i) != XVECLEN (y, i))
1435 /* And the corresponding elements must match. */
1436 for (j = 0; j < XVECLEN (x, i); j++)
1437 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j), movables) == 0)
1442 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables) == 0)
1447 if (strcmp (XSTR (x, i), XSTR (y, i)))
1452 /* These are just backpointers, so they don't matter. */
1458 /* It is believed that rtx's at this level will never
1459 contain anything but integers and other rtx's,
1460 except for within LABEL_REFs and SYMBOL_REFs. */
1468 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1469 insns in INSNS which use thet reference. */
1472 add_label_notes (x, insns)
1476 enum rtx_code code = GET_CODE (x);
1481 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
1483 rtx next = next_real_insn (XEXP (x, 0));
1485 /* Don't record labels that refer to dispatch tables.
1486 This is not necessary, since the tablejump references the same label.
1487 And if we did record them, flow.c would make worse code. */
1489 || ! (GET_CODE (next) == JUMP_INSN
1490 && (GET_CODE (PATTERN (next)) == ADDR_VEC
1491 || GET_CODE (PATTERN (next)) == ADDR_DIFF_VEC)))
1493 for (insn = insns; insn; insn = NEXT_INSN (insn))
1494 if (reg_mentioned_p (XEXP (x, 0), insn))
1495 REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_LABEL, XEXP (x, 0),
1501 fmt = GET_RTX_FORMAT (code);
1502 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1505 add_label_notes (XEXP (x, i), insns);
1506 else if (fmt[i] == 'E')
1507 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1508 add_label_notes (XVECEXP (x, i, j), insns);
1512 /* Scan MOVABLES, and move the insns that deserve to be moved.
1513 If two matching movables are combined, replace one reg with the
1514 other throughout. */
1517 move_movables (movables, threshold, insn_count, loop_start, end, nregs)
1518 struct movable *movables;
1526 register struct movable *m;
1528 /* Map of pseudo-register replacements to handle combining
1529 when we move several insns that load the same value
1530 into different pseudo-registers. */
1531 rtx *reg_map = (rtx *) alloca (nregs * sizeof (rtx));
1532 char *already_moved = (char *) alloca (nregs);
1534 bzero (already_moved, nregs);
1535 bzero (reg_map, nregs * sizeof (rtx));
1539 for (m = movables; m; m = m->next)
1541 /* Describe this movable insn. */
1543 if (loop_dump_stream)
1545 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1546 INSN_UID (m->insn), m->regno, m->lifetime);
1548 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1550 fprintf (loop_dump_stream, "cond ");
1552 fprintf (loop_dump_stream, "force ");
1554 fprintf (loop_dump_stream, "global ");
1556 fprintf (loop_dump_stream, "done ");
1558 fprintf (loop_dump_stream, "move-insn ");
1560 fprintf (loop_dump_stream, "matches %d ",
1561 INSN_UID (m->match->insn));
1563 fprintf (loop_dump_stream, "forces %d ",
1564 INSN_UID (m->forces->insn));
1567 /* Count movables. Value used in heuristics in strength_reduce. */
1570 /* Ignore the insn if it's already done (it matched something else).
1571 Otherwise, see if it is now safe to move. */
1575 || (1 == invariant_p (m->set_src)
1576 && (m->dependencies == 0
1577 || 1 == invariant_p (m->dependencies))
1579 || 1 == consec_sets_invariant_p (m->set_dest,
1582 && (! m->forces || m->forces->done))
1586 int savings = m->savings;
1588 /* We have an insn that is safe to move.
1589 Compute its desirability. */
1594 if (loop_dump_stream)
1595 fprintf (loop_dump_stream, "savings %d ", savings);
1597 if (moved_once[regno])
1601 if (loop_dump_stream)
1602 fprintf (loop_dump_stream, "halved since already moved ");
1605 /* An insn MUST be moved if we already moved something else
1606 which is safe only if this one is moved too: that is,
1607 if already_moved[REGNO] is nonzero. */
1609 /* An insn is desirable to move if the new lifetime of the
1610 register is no more than THRESHOLD times the old lifetime.
1611 If it's not desirable, it means the loop is so big
1612 that moving won't speed things up much,
1613 and it is liable to make register usage worse. */
1615 /* It is also desirable to move if it can be moved at no
1616 extra cost because something else was already moved. */
1618 if (already_moved[regno]
1619 || (threshold * savings * m->lifetime) >= insn_count
1620 || (m->forces && m->forces->done
1621 && n_times_used[m->forces->regno] == 1))
1624 register struct movable *m1;
1627 /* Now move the insns that set the reg. */
1629 if (m->partial && m->match)
1633 /* Find the end of this chain of matching regs.
1634 Thus, we load each reg in the chain from that one reg.
1635 And that reg is loaded with 0 directly,
1636 since it has ->match == 0. */
1637 for (m1 = m; m1->match; m1 = m1->match);
1638 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1639 SET_DEST (PATTERN (m1->insn)));
1640 i1 = emit_insn_before (newpat, loop_start);
1642 /* Mark the moved, invariant reg as being allowed to
1643 share a hard reg with the other matching invariant. */
1644 REG_NOTES (i1) = REG_NOTES (m->insn);
1645 r1 = SET_DEST (PATTERN (m->insn));
1646 r2 = SET_DEST (PATTERN (m1->insn));
1647 regs_may_share = gen_rtx (EXPR_LIST, VOIDmode, r1,
1648 gen_rtx (EXPR_LIST, VOIDmode, r2,
1650 delete_insn (m->insn);
1655 if (loop_dump_stream)
1656 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1658 /* If we are to re-generate the item being moved with a
1659 new move insn, first delete what we have and then emit
1660 the move insn before the loop. */
1661 else if (m->move_insn)
1665 for (count = m->consec; count >= 0; count--)
1667 /* If this is the first insn of a library call sequence,
1669 if (GET_CODE (p) != NOTE
1670 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1673 /* If this is the last insn of a libcall sequence, then
1674 delete every insn in the sequence except the last.
1675 The last insn is handled in the normal manner. */
1676 if (GET_CODE (p) != NOTE
1677 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1679 temp = XEXP (temp, 0);
1681 temp = delete_insn (temp);
1684 p = delete_insn (p);
1688 emit_move_insn (m->set_dest, m->set_src);
1689 temp = get_insns ();
1692 add_label_notes (m->set_src, temp);
1694 i1 = emit_insns_before (temp, loop_start);
1695 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1697 = gen_rtx (EXPR_LIST,
1698 m->is_equiv ? REG_EQUIV : REG_EQUAL,
1699 m->set_src, REG_NOTES (i1));
1701 if (loop_dump_stream)
1702 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1704 /* The more regs we move, the less we like moving them. */
1709 for (count = m->consec; count >= 0; count--)
1713 /* If first insn of libcall sequence, skip to end. */
1714 /* Do this at start of loop, since p is guaranteed to
1716 if (GET_CODE (p) != NOTE
1717 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1720 /* If last insn of libcall sequence, move all
1721 insns except the last before the loop. The last
1722 insn is handled in the normal manner. */
1723 if (GET_CODE (p) != NOTE
1724 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1728 rtx fn_address_insn = 0;
1731 for (temp = XEXP (temp, 0); temp != p;
1732 temp = NEXT_INSN (temp))
1738 if (GET_CODE (temp) == NOTE)
1741 body = PATTERN (temp);
1743 /* Find the next insn after TEMP,
1744 not counting USE or NOTE insns. */
1745 for (next = NEXT_INSN (temp); next != p;
1746 next = NEXT_INSN (next))
1747 if (! (GET_CODE (next) == INSN
1748 && GET_CODE (PATTERN (next)) == USE)
1749 && GET_CODE (next) != NOTE)
1752 /* If that is the call, this may be the insn
1753 that loads the function address.
1755 Extract the function address from the insn
1756 that loads it into a register.
1757 If this insn was cse'd, we get incorrect code.
1759 So emit a new move insn that copies the
1760 function address into the register that the
1761 call insn will use. flow.c will delete any
1762 redundant stores that we have created. */
1763 if (GET_CODE (next) == CALL_INSN
1764 && GET_CODE (body) == SET
1765 && GET_CODE (SET_DEST (body)) == REG
1766 && (n = find_reg_note (temp, REG_EQUAL,
1769 fn_reg = SET_SRC (body);
1770 if (GET_CODE (fn_reg) != REG)
1771 fn_reg = SET_DEST (body);
1772 fn_address = XEXP (n, 0);
1773 fn_address_insn = temp;
1775 /* We have the call insn.
1776 If it uses the register we suspect it might,
1777 load it with the correct address directly. */
1778 if (GET_CODE (temp) == CALL_INSN
1780 && reg_referenced_p (fn_reg, body))
1781 emit_insn_after (gen_move_insn (fn_reg,
1785 if (GET_CODE (temp) == CALL_INSN)
1786 i1 = emit_call_insn_before (body, loop_start);
1788 i1 = emit_insn_before (body, loop_start);
1791 if (temp == fn_address_insn)
1792 fn_address_insn = i1;
1793 REG_NOTES (i1) = REG_NOTES (temp);
1797 if (m->savemode != VOIDmode)
1799 /* P sets REG to zero; but we should clear only
1800 the bits that are not covered by the mode
1802 rtx reg = m->set_dest;
1808 (GET_MODE (reg), and_optab, reg,
1809 GEN_INT ((((HOST_WIDE_INT) 1
1810 << GET_MODE_BITSIZE (m->savemode)))
1812 reg, 1, OPTAB_LIB_WIDEN);
1816 emit_move_insn (reg, tem);
1817 sequence = gen_sequence ();
1819 i1 = emit_insn_before (sequence, loop_start);
1821 else if (GET_CODE (p) == CALL_INSN)
1822 i1 = emit_call_insn_before (PATTERN (p), loop_start);
1824 i1 = emit_insn_before (PATTERN (p), loop_start);
1826 REG_NOTES (i1) = REG_NOTES (p);
1828 /* If there is a REG_EQUAL note present whose value is
1829 not loop invariant, then delete it, since it may
1830 cause problems with later optimization passes.
1831 It is possible for cse to create such notes
1832 like this as a result of record_jump_cond. */
1834 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
1835 && ! invariant_p (XEXP (temp, 0)))
1836 remove_note (i1, temp);
1841 if (loop_dump_stream)
1842 fprintf (loop_dump_stream, " moved to %d",
1846 /* This isn't needed because REG_NOTES is copied
1847 below and is wrong since P might be a PARALLEL. */
1848 if (REG_NOTES (i1) == 0
1849 && ! m->partial /* But not if it's a zero-extend clr. */
1850 && ! m->global /* and not if used outside the loop
1851 (since it might get set outside). */
1852 && CONSTANT_P (SET_SRC (PATTERN (p))))
1854 = gen_rtx (EXPR_LIST, REG_EQUAL,
1855 SET_SRC (PATTERN (p)), REG_NOTES (i1));
1858 /* If library call, now fix the REG_NOTES that contain
1859 insn pointers, namely REG_LIBCALL on FIRST
1860 and REG_RETVAL on I1. */
1861 if (temp = find_reg_note (i1, REG_RETVAL, NULL_RTX))
1863 XEXP (temp, 0) = first;
1864 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
1865 XEXP (temp, 0) = i1;
1869 do p = NEXT_INSN (p);
1870 while (p && GET_CODE (p) == NOTE);
1873 /* The more regs we move, the less we like moving them. */
1877 /* Any other movable that loads the same register
1879 already_moved[regno] = 1;
1881 /* This reg has been moved out of one loop. */
1882 moved_once[regno] = 1;
1884 /* The reg set here is now invariant. */
1886 n_times_set[regno] = 0;
1890 /* Change the length-of-life info for the register
1891 to say it lives at least the full length of this loop.
1892 This will help guide optimizations in outer loops. */
1894 if (uid_luid[regno_first_uid[regno]] > INSN_LUID (loop_start))
1895 /* This is the old insn before all the moved insns.
1896 We can't use the moved insn because it is out of range
1897 in uid_luid. Only the old insns have luids. */
1898 regno_first_uid[regno] = INSN_UID (loop_start);
1899 if (uid_luid[regno_last_uid[regno]] < INSN_LUID (end))
1900 regno_last_uid[regno] = INSN_UID (end);
1902 /* Combine with this moved insn any other matching movables. */
1905 for (m1 = movables; m1; m1 = m1->next)
1910 /* Schedule the reg loaded by M1
1911 for replacement so that shares the reg of M.
1912 If the modes differ (only possible in restricted
1913 circumstances, make a SUBREG. */
1914 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
1915 reg_map[m1->regno] = m->set_dest;
1918 = gen_lowpart_common (GET_MODE (m1->set_dest),
1921 /* Get rid of the matching insn
1922 and prevent further processing of it. */
1925 /* if library call, delete all insn except last, which
1927 if (temp = find_reg_note (m1->insn, REG_RETVAL,
1930 for (temp = XEXP (temp, 0); temp != m1->insn;
1931 temp = NEXT_INSN (temp))
1934 delete_insn (m1->insn);
1936 /* Any other movable that loads the same register
1938 already_moved[m1->regno] = 1;
1940 /* The reg merged here is now invariant,
1941 if the reg it matches is invariant. */
1943 n_times_set[m1->regno] = 0;
1946 else if (loop_dump_stream)
1947 fprintf (loop_dump_stream, "not desirable");
1949 else if (loop_dump_stream && !m->match)
1950 fprintf (loop_dump_stream, "not safe");
1952 if (loop_dump_stream)
1953 fprintf (loop_dump_stream, "\n");
1957 new_start = loop_start;
1959 /* Go through all the instructions in the loop, making
1960 all the register substitutions scheduled in REG_MAP. */
1961 for (p = new_start; p != end; p = NEXT_INSN (p))
1962 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1963 || GET_CODE (p) == CALL_INSN)
1965 replace_regs (PATTERN (p), reg_map, nregs, 0);
1966 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
1972 /* Scan X and replace the address of any MEM in it with ADDR.
1973 REG is the address that MEM should have before the replacement. */
1976 replace_call_address (x, reg, addr)
1979 register enum rtx_code code;
1985 code = GET_CODE (x);
1999 /* Short cut for very common case. */
2000 replace_call_address (XEXP (x, 1), reg, addr);
2004 /* Short cut for very common case. */
2005 replace_call_address (XEXP (x, 0), reg, addr);
2009 /* If this MEM uses a reg other than the one we expected,
2010 something is wrong. */
2011 if (XEXP (x, 0) != reg)
2017 fmt = GET_RTX_FORMAT (code);
2018 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2021 replace_call_address (XEXP (x, i), reg, addr);
2025 for (j = 0; j < XVECLEN (x, i); j++)
2026 replace_call_address (XVECEXP (x, i, j), reg, addr);
2032 /* Return the number of memory refs to addresses that vary
2036 count_nonfixed_reads (x)
2039 register enum rtx_code code;
2047 code = GET_CODE (x);
2061 return ((invariant_p (XEXP (x, 0)) != 1)
2062 + count_nonfixed_reads (XEXP (x, 0)));
2066 fmt = GET_RTX_FORMAT (code);
2067 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2070 value += count_nonfixed_reads (XEXP (x, i));
2074 for (j = 0; j < XVECLEN (x, i); j++)
2075 value += count_nonfixed_reads (XVECEXP (x, i, j));
2083 /* P is an instruction that sets a register to the result of a ZERO_EXTEND.
2084 Replace it with an instruction to load just the low bytes
2085 if the machine supports such an instruction,
2086 and insert above LOOP_START an instruction to clear the register. */
2089 constant_high_bytes (p, loop_start)
2093 register int insn_code_number;
2095 /* Try to change (SET (REG ...) (ZERO_EXTEND (..:B ...)))
2096 to (SET (STRICT_LOW_PART (SUBREG:B (REG...))) ...). */
2098 new = gen_rtx (SET, VOIDmode,
2099 gen_rtx (STRICT_LOW_PART, VOIDmode,
2100 gen_rtx (SUBREG, GET_MODE (XEXP (SET_SRC (PATTERN (p)), 0)),
2101 SET_DEST (PATTERN (p)),
2103 XEXP (SET_SRC (PATTERN (p)), 0));
2104 insn_code_number = recog (new, p);
2106 if (insn_code_number)
2110 /* Clear destination register before the loop. */
2111 emit_insn_before (gen_rtx (SET, VOIDmode,
2112 SET_DEST (PATTERN (p)),
2116 /* Inside the loop, just load the low part. */
2122 /* Scan a loop setting the variables `unknown_address_altered',
2123 `num_mem_sets', `loop_continue', loops_enclosed', `loop_has_call',
2124 and `loop_has_volatile'.
2125 Also, fill in the array `loop_store_mems'. */
2128 prescan_loop (start, end)
2131 register int level = 1;
2134 unknown_address_altered = 0;
2136 loop_has_volatile = 0;
2137 loop_store_mems_idx = 0;
2143 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2144 insn = NEXT_INSN (insn))
2146 if (GET_CODE (insn) == NOTE)
2148 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2151 /* Count number of loops contained in this one. */
2154 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2163 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_CONT)
2166 loop_continue = insn;
2169 else if (GET_CODE (insn) == CALL_INSN)
2171 unknown_address_altered = 1;
2176 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
2178 if (volatile_refs_p (PATTERN (insn)))
2179 loop_has_volatile = 1;
2181 note_stores (PATTERN (insn), note_addr_stored);
2187 /* Scan the function looking for loops. Record the start and end of each loop.
2188 Also mark as invalid loops any loops that contain a setjmp or are branched
2189 to from outside the loop. */
2192 find_and_verify_loops (f)
2196 int current_loop = -1;
2200 /* If there are jumps to undefined labels,
2201 treat them as jumps out of any/all loops.
2202 This also avoids writing past end of tables when there are no loops. */
2203 uid_loop_num[0] = -1;
2205 /* Find boundaries of loops, mark which loops are contained within
2206 loops, and invalidate loops that have setjmp. */
2208 for (insn = f; insn; insn = NEXT_INSN (insn))
2210 if (GET_CODE (insn) == NOTE)
2211 switch (NOTE_LINE_NUMBER (insn))
2213 case NOTE_INSN_LOOP_BEG:
2214 loop_number_loop_starts[++next_loop] = insn;
2215 loop_number_loop_ends[next_loop] = 0;
2216 loop_outer_loop[next_loop] = current_loop;
2217 loop_invalid[next_loop] = 0;
2218 loop_number_exit_labels[next_loop] = 0;
2219 current_loop = next_loop;
2222 case NOTE_INSN_SETJMP:
2223 /* In this case, we must invalidate our current loop and any
2225 for (loop = current_loop; loop != -1; loop = loop_outer_loop[loop])
2227 loop_invalid[loop] = 1;
2228 if (loop_dump_stream)
2229 fprintf (loop_dump_stream,
2230 "\nLoop at %d ignored due to setjmp.\n",
2231 INSN_UID (loop_number_loop_starts[loop]));
2235 case NOTE_INSN_LOOP_END:
2236 if (current_loop == -1)
2239 loop_number_loop_ends[current_loop] = insn;
2240 current_loop = loop_outer_loop[current_loop];
2245 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2246 enclosing loop, but this doesn't matter. */
2247 uid_loop_num[INSN_UID (insn)] = current_loop;
2250 /* Any loop containing a label used in an initializer must be invalidated,
2251 because it can be jumped into from anywhere. */
2253 for (label = forced_labels; label; label = XEXP (label, 1))
2257 for (loop_num = uid_loop_num[INSN_UID (XEXP (label, 0))];
2259 loop_num = loop_outer_loop[loop_num])
2260 loop_invalid[loop_num] = 1;
2263 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2264 loop that it is not contained within, that loop is marked invalid.
2265 If any INSN or CALL_INSN uses a label's address, then the loop containing
2266 that label is marked invalid, because it could be jumped into from
2269 Also look for blocks of code ending in an unconditional branch that
2270 exits the loop. If such a block is surrounded by a conditional
2271 branch around the block, move the block elsewhere (see below) and
2272 invert the jump to point to the code block. This may eliminate a
2273 label in our loop and will simplify processing by both us and a
2274 possible second cse pass. */
2276 for (insn = f; insn; insn = NEXT_INSN (insn))
2277 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2279 int this_loop_num = uid_loop_num[INSN_UID (insn)];
2281 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
2283 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2288 for (loop_num = uid_loop_num[INSN_UID (XEXP (note, 0))];
2290 loop_num = loop_outer_loop[loop_num])
2291 loop_invalid[loop_num] = 1;
2295 if (GET_CODE (insn) != JUMP_INSN)
2298 mark_loop_jump (PATTERN (insn), this_loop_num);
2300 /* See if this is an unconditional branch outside the loop. */
2301 if (this_loop_num != -1
2302 && (GET_CODE (PATTERN (insn)) == RETURN
2303 || (simplejump_p (insn)
2304 && (uid_loop_num[INSN_UID (JUMP_LABEL (insn))]
2306 && get_max_uid () < max_uid_for_loop)
2309 rtx our_next = next_real_insn (insn);
2311 /* Go backwards until we reach the start of the loop, a label,
2313 for (p = PREV_INSN (insn);
2314 GET_CODE (p) != CODE_LABEL
2315 && ! (GET_CODE (p) == NOTE
2316 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2317 && GET_CODE (p) != JUMP_INSN;
2321 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2322 we have a block of code to try to move.
2324 We look backward and then forward from the target of INSN
2325 to find a BARRIER at the same loop depth as the target.
2326 If we find such a BARRIER, we make a new label for the start
2327 of the block, invert the jump in P and point it to that label,
2328 and move the block of code to the spot we found. */
2330 if (GET_CODE (p) == JUMP_INSN
2331 && JUMP_LABEL (p) != 0
2332 /* Just ignore jumps to labels that were never emitted.
2333 These always indicate compilation errors. */
2334 && INSN_UID (JUMP_LABEL (p)) != 0
2336 && ! simplejump_p (p)
2337 && next_real_insn (JUMP_LABEL (p)) == our_next)
2340 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
2341 int target_loop_num = uid_loop_num[INSN_UID (target)];
2344 for (loc = target; loc; loc = PREV_INSN (loc))
2345 if (GET_CODE (loc) == BARRIER
2346 && uid_loop_num[INSN_UID (loc)] == target_loop_num)
2350 for (loc = target; loc; loc = NEXT_INSN (loc))
2351 if (GET_CODE (loc) == BARRIER
2352 && uid_loop_num[INSN_UID (loc)] == target_loop_num)
2357 rtx cond_label = JUMP_LABEL (p);
2358 rtx new_label = get_label_after (p);
2360 /* Ensure our label doesn't go away. */
2361 LABEL_NUSES (cond_label)++;
2363 /* Verify that uid_loop_num is large enough and that
2365 if (invert_jump (p, new_label))
2369 /* Include the BARRIER after INSN and copy the
2371 new_label = squeeze_notes (new_label, NEXT_INSN (insn));
2372 reorder_insns (new_label, NEXT_INSN (insn), loc);
2374 /* All those insns are now in TARGET_LOOP_NUM. */
2375 for (q = new_label; q != NEXT_INSN (NEXT_INSN (insn));
2377 uid_loop_num[INSN_UID (q)] = target_loop_num;
2379 /* The label jumped to by INSN is no longer a loop exit.
2380 Unless INSN does not have a label (e.g., it is a
2381 RETURN insn), search loop_number_exit_labels to find
2382 its label_ref, and remove it. Also turn off
2383 LABEL_OUTSIDE_LOOP_P bit. */
2384 if (JUMP_LABEL (insn))
2387 r = loop_number_exit_labels[this_loop_num];
2388 r; q = r, r = LABEL_NEXTREF (r))
2389 if (XEXP (r, 0) == JUMP_LABEL (insn))
2391 LABEL_OUTSIDE_LOOP_P (r) = 0;
2393 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
2395 loop_number_exit_labels[this_loop_num]
2396 = LABEL_NEXTREF (r);
2400 /* If we didn't find it, then something is wrong. */
2405 /* P is now a jump outside the loop, so it must be put
2406 in loop_number_exit_labels, and marked as such.
2407 The easiest way to do this is to just call
2408 mark_loop_jump again for P. */
2409 mark_loop_jump (PATTERN (p), this_loop_num);
2411 /* If INSN now jumps to the insn after it,
2413 if (JUMP_LABEL (insn) != 0
2414 && (next_real_insn (JUMP_LABEL (insn))
2415 == next_real_insn (insn)))
2419 /* Continue the loop after where the conditional
2420 branch used to jump, since the only branch insn
2421 in the block (if it still remains) is an inter-loop
2422 branch and hence needs no processing. */
2423 insn = NEXT_INSN (cond_label);
2425 if (--LABEL_NUSES (cond_label) == 0)
2426 delete_insn (cond_label);
2428 /* This loop will be continued with NEXT_INSN (insn). */
2429 insn = PREV_INSN (insn);
2436 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
2437 loops it is contained in, mark the target loop invalid.
2439 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
2442 mark_loop_jump (x, loop_num)
2450 switch (GET_CODE (x))
2463 /* There could be a label reference in here. */
2464 mark_loop_jump (XEXP (x, 0), loop_num);
2471 mark_loop_jump (XEXP (x, 0), loop_num);
2472 mark_loop_jump (XEXP (x, 1), loop_num);
2477 mark_loop_jump (XEXP (x, 0), loop_num);
2481 dest_loop = uid_loop_num[INSN_UID (XEXP (x, 0))];
2483 /* Link together all labels that branch outside the loop. This
2484 is used by final_[bg]iv_value and the loop unrolling code. Also
2485 mark this LABEL_REF so we know that this branch should predict
2488 if (dest_loop != loop_num && loop_num != -1)
2490 LABEL_OUTSIDE_LOOP_P (x) = 1;
2491 LABEL_NEXTREF (x) = loop_number_exit_labels[loop_num];
2492 loop_number_exit_labels[loop_num] = x;
2495 /* If this is inside a loop, but not in the current loop or one enclosed
2496 by it, it invalidates at least one loop. */
2498 if (dest_loop == -1)
2501 /* We must invalidate every nested loop containing the target of this
2502 label, except those that also contain the jump insn. */
2504 for (; dest_loop != -1; dest_loop = loop_outer_loop[dest_loop])
2506 /* Stop when we reach a loop that also contains the jump insn. */
2507 for (outer_loop = loop_num; outer_loop != -1;
2508 outer_loop = loop_outer_loop[outer_loop])
2509 if (dest_loop == outer_loop)
2512 /* If we get here, we know we need to invalidate a loop. */
2513 if (loop_dump_stream && ! loop_invalid[dest_loop])
2514 fprintf (loop_dump_stream,
2515 "\nLoop at %d ignored due to multiple entry points.\n",
2516 INSN_UID (loop_number_loop_starts[dest_loop]));
2518 loop_invalid[dest_loop] = 1;
2523 /* If this is not setting pc, ignore. */
2524 if (SET_DEST (x) == pc_rtx)
2525 mark_loop_jump (SET_SRC (x), loop_num);
2529 mark_loop_jump (XEXP (x, 1), loop_num);
2530 mark_loop_jump (XEXP (x, 2), loop_num);
2535 for (i = 0; i < XVECLEN (x, 0); i++)
2536 mark_loop_jump (XVECEXP (x, 0, i), loop_num);
2540 for (i = 0; i < XVECLEN (x, 1); i++)
2541 mark_loop_jump (XVECEXP (x, 1, i), loop_num);
2545 /* Treat anything else (such as a symbol_ref)
2546 as a branch out of this loop, but not into any loop. */
2550 LABEL_OUTSIDE_LOOP_P (x) = 1;
2551 LABEL_NEXTREF (x) = loop_number_exit_labels[loop_num];
2552 loop_number_exit_labels[loop_num] = x;
2559 /* Return nonzero if there is a label in the range from
2560 insn INSN to and including the insn whose luid is END
2561 INSN must have an assigned luid (i.e., it must not have
2562 been previously created by loop.c). */
2565 labels_in_range_p (insn, end)
2569 while (insn && INSN_LUID (insn) <= end)
2571 if (GET_CODE (insn) == CODE_LABEL)
2573 insn = NEXT_INSN (insn);
2579 /* Record that a memory reference X is being set. */
2582 note_addr_stored (x)
2587 if (x == 0 || GET_CODE (x) != MEM)
2590 /* Count number of memory writes.
2591 This affects heuristics in strength_reduce. */
2594 if (unknown_address_altered)
2597 for (i = 0; i < loop_store_mems_idx; i++)
2598 if (rtx_equal_p (XEXP (loop_store_mems[i], 0), XEXP (x, 0))
2599 && MEM_IN_STRUCT_P (x) == MEM_IN_STRUCT_P (loop_store_mems[i]))
2601 /* We are storing at the same address as previously noted. Save the
2602 wider reference, treating BLKmode as wider. */
2603 if (GET_MODE (x) == BLKmode
2604 || (GET_MODE_SIZE (GET_MODE (x))
2605 > GET_MODE_SIZE (GET_MODE (loop_store_mems[i]))))
2606 loop_store_mems[i] = x;
2610 if (i == NUM_STORES)
2611 unknown_address_altered = 1;
2613 else if (i == loop_store_mems_idx)
2614 loop_store_mems[loop_store_mems_idx++] = x;
2617 /* Return nonzero if the rtx X is invariant over the current loop.
2619 The value is 2 if we refer to something only conditionally invariant.
2621 If `unknown_address_altered' is nonzero, no memory ref is invariant.
2622 Otherwise, a memory ref is invariant if it does not conflict with
2623 anything stored in `loop_store_mems'. */
2630 register enum rtx_code code;
2632 int conditional = 0;
2636 code = GET_CODE (x);
2646 /* A LABEL_REF is normally invariant, however, if we are unrolling
2647 loops, and this label is inside the loop, then it isn't invariant.
2648 This is because each unrolled copy of the loop body will have
2649 a copy of this label. If this was invariant, then an insn loading
2650 the address of this label into a register might get moved outside
2651 the loop, and then each loop body would end up using the same label.
2653 We don't know the loop bounds here though, so just fail for all
2655 if (flag_unroll_loops)
2662 case UNSPEC_VOLATILE:
2666 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
2667 since the reg might be set by initialization within the loop. */
2668 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
2669 || x == arg_pointer_rtx)
2672 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
2674 if (n_times_set[REGNO (x)] < 0)
2676 return n_times_set[REGNO (x)] == 0;
2679 /* Read-only items (such as constants in a constant pool) are
2680 invariant if their address is. */
2681 if (RTX_UNCHANGING_P (x))
2684 /* If we filled the table (or had a subroutine call), any location
2685 in memory could have been clobbered. */
2686 if (unknown_address_altered
2687 /* Don't mess with volatile memory references. */
2688 || MEM_VOLATILE_P (x))
2691 /* See if there is any dependence between a store and this load. */
2692 for (i = loop_store_mems_idx - 1; i >= 0; i--)
2693 if (true_dependence (loop_store_mems[i], x))
2696 /* It's not invalidated by a store in memory
2697 but we must still verify the address is invariant. */
2701 /* Don't mess with insns declared volatile. */
2702 if (MEM_VOLATILE_P (x))
2706 fmt = GET_RTX_FORMAT (code);
2707 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2711 int tem = invariant_p (XEXP (x, i));
2717 else if (fmt[i] == 'E')
2720 for (j = 0; j < XVECLEN (x, i); j++)
2722 int tem = invariant_p (XVECEXP (x, i, j));
2732 return 1 + conditional;
2736 /* Return nonzero if all the insns in the loop that set REG
2737 are INSN and the immediately following insns,
2738 and if each of those insns sets REG in an invariant way
2739 (not counting uses of REG in them).
2741 The value is 2 if some of these insns are only conditionally invariant.
2743 We assume that INSN itself is the first set of REG
2744 and that its source is invariant. */
2747 consec_sets_invariant_p (reg, n_sets, insn)
2751 register rtx p = insn;
2752 register int regno = REGNO (reg);
2754 /* Number of sets we have to insist on finding after INSN. */
2755 int count = n_sets - 1;
2756 int old = n_times_set[regno];
2760 /* If N_SETS hit the limit, we can't rely on its value. */
2764 n_times_set[regno] = 0;
2768 register enum rtx_code code;
2772 code = GET_CODE (p);
2774 /* If library call, skip to end of of it. */
2775 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
2780 && (set = single_set (p))
2781 && GET_CODE (SET_DEST (set)) == REG
2782 && REGNO (SET_DEST (set)) == regno)
2784 this = invariant_p (SET_SRC (set));
2787 else if (temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
2789 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
2790 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
2792 this = (CONSTANT_P (XEXP (temp, 0))
2793 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
2794 && invariant_p (XEXP (temp, 0))));
2801 else if (code != NOTE)
2803 n_times_set[regno] = old;
2808 n_times_set[regno] = old;
2809 /* If invariant_p ever returned 2, we return 2. */
2810 return 1 + (value & 2);
2814 /* I don't think this condition is sufficient to allow INSN
2815 to be moved, so we no longer test it. */
2817 /* Return 1 if all insns in the basic block of INSN and following INSN
2818 that set REG are invariant according to TABLE. */
2821 all_sets_invariant_p (reg, insn, table)
2825 register rtx p = insn;
2826 register int regno = REGNO (reg);
2830 register enum rtx_code code;
2832 code = GET_CODE (p);
2833 if (code == CODE_LABEL || code == JUMP_INSN)
2835 if (code == INSN && GET_CODE (PATTERN (p)) == SET
2836 && GET_CODE (SET_DEST (PATTERN (p))) == REG
2837 && REGNO (SET_DEST (PATTERN (p))) == regno)
2839 if (!invariant_p (SET_SRC (PATTERN (p)), table))
2846 /* Look at all uses (not sets) of registers in X. For each, if it is
2847 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
2848 a different insn, set USAGE[REGNO] to const0_rtx. */
2851 find_single_use_in_loop (insn, x, usage)
2856 enum rtx_code code = GET_CODE (x);
2857 char *fmt = GET_RTX_FORMAT (code);
2862 = (usage[REGNO (x)] != 0 && usage[REGNO (x)] != insn)
2863 ? const0_rtx : insn;
2865 else if (code == SET)
2867 /* Don't count SET_DEST if it is a REG; otherwise count things
2868 in SET_DEST because if a register is partially modified, it won't
2869 show up as a potential movable so we don't care how USAGE is set
2871 if (GET_CODE (SET_DEST (x)) != REG)
2872 find_single_use_in_loop (insn, SET_DEST (x), usage);
2873 find_single_use_in_loop (insn, SET_SRC (x), usage);
2876 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2878 if (fmt[i] == 'e' && XEXP (x, i) != 0)
2879 find_single_use_in_loop (insn, XEXP (x, i), usage);
2880 else if (fmt[i] == 'E')
2881 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2882 find_single_use_in_loop (insn, XVECEXP (x, i, j), usage);
2886 /* Increment N_TIMES_SET at the index of each register
2887 that is modified by an insn between FROM and TO.
2888 If the value of an element of N_TIMES_SET becomes 127 or more,
2889 stop incrementing it, to avoid overflow.
2891 Store in SINGLE_USAGE[I] the single insn in which register I is
2892 used, if it is only used once. Otherwise, it is set to 0 (for no
2893 uses) or const0_rtx for more than one use. This parameter may be zero,
2894 in which case this processing is not done.
2896 Store in *COUNT_PTR the number of actual instruction
2897 in the loop. We use this to decide what is worth moving out. */
2899 /* last_set[n] is nonzero iff reg n has been set in the current basic block.
2900 In that case, it is the insn that last set reg n. */
2903 count_loop_regs_set (from, to, may_not_move, single_usage, count_ptr, nregs)
2904 register rtx from, to;
2910 register rtx *last_set = (rtx *) alloca (nregs * sizeof (rtx));
2912 register int count = 0;
2915 bzero (last_set, nregs * sizeof (rtx));
2916 for (insn = from; insn != to; insn = NEXT_INSN (insn))
2918 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2922 /* If requested, record registers that have exactly one use. */
2925 find_single_use_in_loop (insn, PATTERN (insn), single_usage);
2927 /* Include uses in REG_EQUAL notes. */
2928 if (REG_NOTES (insn))
2929 find_single_use_in_loop (insn, REG_NOTES (insn), single_usage);
2932 if (GET_CODE (PATTERN (insn)) == CLOBBER
2933 && GET_CODE (XEXP (PATTERN (insn), 0)) == REG)
2934 /* Don't move a reg that has an explicit clobber.
2935 We might do so sometimes, but it's not worth the pain. */
2936 may_not_move[REGNO (XEXP (PATTERN (insn), 0))] = 1;
2938 if (GET_CODE (PATTERN (insn)) == SET
2939 || GET_CODE (PATTERN (insn)) == CLOBBER)
2941 dest = SET_DEST (PATTERN (insn));
2942 while (GET_CODE (dest) == SUBREG
2943 || GET_CODE (dest) == ZERO_EXTRACT
2944 || GET_CODE (dest) == SIGN_EXTRACT
2945 || GET_CODE (dest) == STRICT_LOW_PART)
2946 dest = XEXP (dest, 0);
2947 if (GET_CODE (dest) == REG)
2949 register int regno = REGNO (dest);
2950 /* If this is the first setting of this reg
2951 in current basic block, and it was set before,
2952 it must be set in two basic blocks, so it cannot
2953 be moved out of the loop. */
2954 if (n_times_set[regno] > 0 && last_set[regno] == 0)
2955 may_not_move[regno] = 1;
2956 /* If this is not first setting in current basic block,
2957 see if reg was used in between previous one and this.
2958 If so, neither one can be moved. */
2959 if (last_set[regno] != 0
2960 && reg_used_between_p (dest, last_set[regno], insn))
2961 may_not_move[regno] = 1;
2962 if (n_times_set[regno] < 127)
2963 ++n_times_set[regno];
2964 last_set[regno] = insn;
2967 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
2970 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
2972 register rtx x = XVECEXP (PATTERN (insn), 0, i);
2973 if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
2974 /* Don't move a reg that has an explicit clobber.
2975 It's not worth the pain to try to do it correctly. */
2976 may_not_move[REGNO (XEXP (x, 0))] = 1;
2978 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
2980 dest = SET_DEST (x);
2981 while (GET_CODE (dest) == SUBREG
2982 || GET_CODE (dest) == ZERO_EXTRACT
2983 || GET_CODE (dest) == SIGN_EXTRACT
2984 || GET_CODE (dest) == STRICT_LOW_PART)
2985 dest = XEXP (dest, 0);
2986 if (GET_CODE (dest) == REG)
2988 register int regno = REGNO (dest);
2989 if (n_times_set[regno] > 0 && last_set[regno] == 0)
2990 may_not_move[regno] = 1;
2991 if (last_set[regno] != 0
2992 && reg_used_between_p (dest, last_set[regno], insn))
2993 may_not_move[regno] = 1;
2994 if (n_times_set[regno] < 127)
2995 ++n_times_set[regno];
2996 last_set[regno] = insn;
3002 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
3003 bzero (last_set, nregs * sizeof (rtx));
3008 /* Given a loop that is bounded by LOOP_START and LOOP_END
3009 and that is entered at SCAN_START,
3010 return 1 if the register set in SET contained in insn INSN is used by
3011 any insn that precedes INSN in cyclic order starting
3012 from the loop entry point.
3014 We don't want to use INSN_LUID here because if we restrict INSN to those
3015 that have a valid INSN_LUID, it means we cannot move an invariant out
3016 from an inner loop past two loops. */
3019 loop_reg_used_before_p (set, insn, loop_start, scan_start, loop_end)
3020 rtx set, insn, loop_start, scan_start, loop_end;
3022 rtx reg = SET_DEST (set);
3025 /* Scan forward checking for register usage. If we hit INSN, we
3026 are done. Otherwise, if we hit LOOP_END, wrap around to LOOP_START. */
3027 for (p = scan_start; p != insn; p = NEXT_INSN (p))
3029 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
3030 && reg_overlap_mentioned_p (reg, PATTERN (p)))
3040 /* A "basic induction variable" or biv is a pseudo reg that is set
3041 (within this loop) only by incrementing or decrementing it. */
3042 /* A "general induction variable" or giv is a pseudo reg whose
3043 value is a linear function of a biv. */
3045 /* Bivs are recognized by `basic_induction_var';
3046 Givs by `general_induct_var'. */
3048 /* Indexed by register number, indicates whether or not register is an
3049 induction variable, and if so what type. */
3051 enum iv_mode *reg_iv_type;
3053 /* Indexed by register number, contains pointer to `struct induction'
3054 if register is an induction variable. This holds general info for
3055 all induction variables. */
3057 struct induction **reg_iv_info;
3059 /* Indexed by register number, contains pointer to `struct iv_class'
3060 if register is a basic induction variable. This holds info describing
3061 the class (a related group) of induction variables that the biv belongs
3064 struct iv_class **reg_biv_class;
3066 /* The head of a list which links together (via the next field)
3067 every iv class for the current loop. */
3069 struct iv_class *loop_iv_list;
3071 /* Communication with routines called via `note_stores'. */
3073 static rtx note_insn;
3075 /* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
3077 static rtx addr_placeholder;
3079 /* ??? Unfinished optimizations, and possible future optimizations,
3080 for the strength reduction code. */
3082 /* ??? There is one more optimization you might be interested in doing: to
3083 allocate pseudo registers for frequently-accessed memory locations.
3084 If the same memory location is referenced each time around, it might
3085 be possible to copy it into a register before and out after.
3086 This is especially useful when the memory location is a variable which
3087 is in a stack slot because somewhere its address is taken. If the
3088 loop doesn't contain a function call and the variable isn't volatile,
3089 it is safe to keep the value in a register for the duration of the
3090 loop. One tricky thing is that the copying of the value back from the
3091 register has to be done on all exits from the loop. You need to check that
3092 all the exits from the loop go to the same place. */
3094 /* ??? The interaction of biv elimination, and recognition of 'constant'
3095 bivs, may cause problems. */
3097 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
3098 performance problems.
3100 Perhaps don't eliminate things that can be combined with an addressing
3101 mode. Find all givs that have the same biv, mult_val, and add_val;
3102 then for each giv, check to see if its only use dies in a following
3103 memory address. If so, generate a new memory address and check to see
3104 if it is valid. If it is valid, then store the modified memory address,
3105 otherwise, mark the giv as not done so that it will get its own iv. */
3107 /* ??? Could try to optimize branches when it is known that a biv is always
3110 /* ??? When replace a biv in a compare insn, we should replace with closest
3111 giv so that an optimized branch can still be recognized by the combiner,
3112 e.g. the VAX acb insn. */
3114 /* ??? Many of the checks involving uid_luid could be simplified if regscan
3115 was rerun in loop_optimize whenever a register was added or moved.
3116 Also, some of the optimizations could be a little less conservative. */
3118 /* Perform strength reduction and induction variable elimination. */
3120 /* Pseudo registers created during this function will be beyond the last
3121 valid index in several tables including n_times_set and regno_last_uid.
3122 This does not cause a problem here, because the added registers cannot be
3123 givs outside of their loop, and hence will never be reconsidered.
3124 But scan_loop must check regnos to make sure they are in bounds. */
3127 strength_reduce (scan_start, end, loop_top, insn_count,
3128 loop_start, loop_end)
3141 /* This is 1 if current insn is not executed at least once for every loop
3143 int not_every_iteration = 0;
3144 /* This is 1 if current insn may be executed more than once for every
3146 int maybe_multiple = 0;
3147 /* Temporary list pointers for traversing loop_iv_list. */
3148 struct iv_class *bl, **backbl;
3149 /* Ratio of extra register life span we can justify
3150 for saving an instruction. More if loop doesn't call subroutines
3151 since in that case saving an insn makes more difference
3152 and more registers are available. */
3153 /* ??? could set this to last value of threshold in move_movables */
3154 int threshold = (loop_has_call ? 1 : 2) * (3 + n_non_fixed_regs);
3155 /* Map of pseudo-register replacements. */
3159 rtx end_insert_before;
3161 reg_iv_type = (enum iv_mode *) alloca (max_reg_before_loop
3162 * sizeof (enum iv_mode *));
3163 bzero ((char *) reg_iv_type, max_reg_before_loop * sizeof (enum iv_mode *));
3164 reg_iv_info = (struct induction **)
3165 alloca (max_reg_before_loop * sizeof (struct induction *));
3166 bzero ((char *) reg_iv_info, (max_reg_before_loop
3167 * sizeof (struct induction *)));
3168 reg_biv_class = (struct iv_class **)
3169 alloca (max_reg_before_loop * sizeof (struct iv_class *));
3170 bzero ((char *) reg_biv_class, (max_reg_before_loop
3171 * sizeof (struct iv_class *)));
3174 addr_placeholder = gen_reg_rtx (Pmode);
3176 /* Save insn immediately after the loop_end. Insns inserted after loop_end
3177 must be put before this insn, so that they will appear in the right
3178 order (i.e. loop order).
3180 If loop_end is the end of the current function, then emit a
3181 NOTE_INSN_DELETED after loop_end and set end_insert_before to the
3183 if (NEXT_INSN (loop_end) != 0)
3184 end_insert_before = NEXT_INSN (loop_end);
3186 end_insert_before = emit_note_after (NOTE_INSN_DELETED, loop_end);
3188 /* Scan through loop to find all possible bivs. */
3194 /* At end of a straight-in loop, we are done.
3195 At end of a loop entered at the bottom, scan the top. */
3196 if (p == scan_start)
3204 if (p == scan_start)
3208 if (GET_CODE (p) == INSN
3209 && (set = single_set (p))
3210 && GET_CODE (SET_DEST (set)) == REG)
3212 dest_reg = SET_DEST (set);
3213 if (REGNO (dest_reg) < max_reg_before_loop
3214 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
3215 && reg_iv_type[REGNO (dest_reg)] != NOT_BASIC_INDUCT)
3217 if (basic_induction_var (SET_SRC (set), GET_MODE (SET_SRC (set)),
3218 dest_reg, p, &inc_val, &mult_val))
3220 /* It is a possible basic induction variable.
3221 Create and initialize an induction structure for it. */
3224 = (struct induction *) alloca (sizeof (struct induction));
3226 record_biv (v, p, dest_reg, inc_val, mult_val,
3227 not_every_iteration, maybe_multiple);
3228 reg_iv_type[REGNO (dest_reg)] = BASIC_INDUCT;
3230 else if (REGNO (dest_reg) < max_reg_before_loop)
3231 reg_iv_type[REGNO (dest_reg)] = NOT_BASIC_INDUCT;
3235 /* Past CODE_LABEL, we get to insns that may be executed multiple
3236 times. The only way we can be sure that they can't is if every
3237 every jump insn between here and the end of the loop either
3238 returns, exits the loop, or is a forward jump. */
3240 if (GET_CODE (p) == CODE_LABEL)
3248 insn = NEXT_INSN (insn);
3249 if (insn == scan_start)
3257 if (insn == scan_start)
3261 if (GET_CODE (insn) == JUMP_INSN
3262 && GET_CODE (PATTERN (insn)) != RETURN
3263 && (! condjump_p (insn)
3264 || (JUMP_LABEL (insn) != 0
3265 && (INSN_UID (JUMP_LABEL (insn)) >= max_uid_for_loop
3266 || INSN_UID (insn) >= max_uid_for_loop
3267 || (INSN_LUID (JUMP_LABEL (insn))
3268 < INSN_LUID (insn))))))
3276 /* Past a label or a jump, we get to insns for which we can't count
3277 on whether or how many times they will be executed during each
3279 /* This code appears in three places, once in scan_loop, and twice
3280 in strength_reduce. */
3281 if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
3282 /* If we enter the loop in the middle, and scan around to the
3283 beginning, don't set not_every_iteration for that.
3284 This can be any kind of jump, since we want to know if insns
3285 will be executed if the loop is executed. */
3286 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop_top
3287 && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p))
3288 || (NEXT_INSN (p) == loop_end && condjump_p (p)))))
3289 not_every_iteration = 1;
3291 /* At the virtual top of a converted loop, insns are again known to
3292 be executed each iteration: logically, the loop begins here
3293 even though the exit code has been duplicated. */
3295 else if (GET_CODE (p) == NOTE
3296 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP)
3297 not_every_iteration = 0;
3299 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3300 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3301 or not an insn is known to be executed each iteration of the
3302 loop, whether or not any iterations are known to occur.
3304 Therefore, if we have just passed a label and have no more labels
3305 between here and the test insn of the loop, we know these insns
3306 will be executed each iteration. This can also happen if we
3307 have just passed a jump, for example, when there are nested loops. */
3309 if (not_every_iteration && GET_CODE (p) == CODE_LABEL
3310 && no_labels_between_p (p, loop_end))
3311 not_every_iteration = 0;
3314 /* Scan loop_iv_list to remove all regs that proved not to be bivs.
3315 Make a sanity check against n_times_set. */
3316 for (backbl = &loop_iv_list, bl = *backbl; bl; bl = bl->next)
3318 if (reg_iv_type[bl->regno] != BASIC_INDUCT
3319 /* Above happens if register modified by subreg, etc. */
3320 /* Make sure it is not recognized as a basic induction var: */
3321 || n_times_set[bl->regno] != bl->biv_count
3322 /* If never incremented, it is invariant that we decided not to
3323 move. So leave it alone. */
3324 || ! bl->incremented)
3326 if (loop_dump_stream)
3327 fprintf (loop_dump_stream, "Reg %d: biv discarded, %s\n",
3329 (reg_iv_type[bl->regno] != BASIC_INDUCT
3330 ? "not induction variable"
3331 : (! bl->incremented ? "never incremented"
3334 reg_iv_type[bl->regno] = NOT_BASIC_INDUCT;
3341 if (loop_dump_stream)
3342 fprintf (loop_dump_stream, "Reg %d: biv verified\n", bl->regno);
3346 /* Exit if there are no bivs. */
3349 /* Can still unroll the loop anyways, but indicate that there is no
3350 strength reduction info available. */
3351 if (flag_unroll_loops)
3352 unroll_loop (loop_end, insn_count, loop_start, end_insert_before, 0);
3357 /* Find initial value for each biv by searching backwards from loop_start,
3358 halting at first label. Also record any test condition. */
3361 for (p = loop_start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
3365 if (GET_CODE (p) == CALL_INSN)
3368 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
3369 || GET_CODE (p) == CALL_INSN)
3370 note_stores (PATTERN (p), record_initial);
3372 /* Record any test of a biv that branches around the loop if no store
3373 between it and the start of loop. We only care about tests with
3374 constants and registers and only certain of those. */
3375 if (GET_CODE (p) == JUMP_INSN
3376 && JUMP_LABEL (p) != 0
3377 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop_end)
3378 && (test = get_condition_for_loop (p)) != 0
3379 && GET_CODE (XEXP (test, 0)) == REG
3380 && REGNO (XEXP (test, 0)) < max_reg_before_loop
3381 && (bl = reg_biv_class[REGNO (XEXP (test, 0))]) != 0
3382 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop_start)
3383 && bl->init_insn == 0)
3385 /* If an NE test, we have an initial value! */
3386 if (GET_CODE (test) == NE)
3389 bl->init_set = gen_rtx (SET, VOIDmode,
3390 XEXP (test, 0), XEXP (test, 1));
3393 bl->initial_test = test;
3397 /* Look at the each biv and see if we can say anything better about its
3398 initial value from any initializing insns set up above. (This is done
3399 in two passes to avoid missing SETs in a PARALLEL.) */
3400 for (bl = loop_iv_list; bl; bl = bl->next)
3404 if (! bl->init_insn)
3407 src = SET_SRC (bl->init_set);
3409 if (loop_dump_stream)
3410 fprintf (loop_dump_stream,
3411 "Biv %d initialized at insn %d: initial value ",
3412 bl->regno, INSN_UID (bl->init_insn));
3414 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
3415 || GET_MODE (src) == VOIDmode)
3416 && valid_initial_value_p (src, bl->init_insn, call_seen, loop_start))
3418 bl->initial_value = src;
3420 if (loop_dump_stream)
3422 if (GET_CODE (src) == CONST_INT)
3423 fprintf (loop_dump_stream, "%d\n", INTVAL (src));
3426 print_rtl (loop_dump_stream, src);
3427 fprintf (loop_dump_stream, "\n");
3433 /* Biv initial value is not simple move,
3434 so let it keep initial value of "itself". */
3436 if (loop_dump_stream)
3437 fprintf (loop_dump_stream, "is complex\n");
3441 /* Search the loop for general induction variables. */
3443 /* A register is a giv if: it is only set once, it is a function of a
3444 biv and a constant (or invariant), and it is not a biv. */
3446 not_every_iteration = 0;
3451 /* At end of a straight-in loop, we are done.
3452 At end of a loop entered at the bottom, scan the top. */
3453 if (p == scan_start)
3461 if (p == scan_start)
3465 /* Look for a general induction variable in a register. */
3466 if (GET_CODE (p) == INSN
3467 && (set = single_set (p))
3468 && GET_CODE (SET_DEST (set)) == REG
3469 && ! may_not_optimize[REGNO (SET_DEST (set))])
3477 dest_reg = SET_DEST (set);
3478 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
3481 if (/* SET_SRC is a giv. */
3482 ((benefit = general_induction_var (SET_SRC (set),
3485 /* Equivalent expression is a giv. */
3486 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
3487 && (benefit = general_induction_var (XEXP (regnote, 0),
3489 &add_val, &mult_val))))
3490 /* Don't try to handle any regs made by loop optimization.
3491 We have nothing on them in regno_first_uid, etc. */
3492 && REGNO (dest_reg) < max_reg_before_loop
3493 /* Don't recognize a BASIC_INDUCT_VAR here. */
3494 && dest_reg != src_reg
3495 /* This must be the only place where the register is set. */
3496 && (n_times_set[REGNO (dest_reg)] == 1
3497 /* or all sets must be consecutive and make a giv. */
3498 || (benefit = consec_sets_giv (benefit, p,
3500 &add_val, &mult_val))))
3504 = (struct induction *) alloca (sizeof (struct induction));
3507 /* If this is a library call, increase benefit. */
3508 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
3509 benefit += libcall_benefit (p);
3511 /* Skip the consecutive insns, if there are any. */
3512 for (count = n_times_set[REGNO (dest_reg)] - 1;
3515 /* If first insn of libcall sequence, skip to end.
3516 Do this at start of loop, since INSN is guaranteed to
3518 if (GET_CODE (p) != NOTE
3519 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3522 do p = NEXT_INSN (p);
3523 while (GET_CODE (p) == NOTE);
3526 record_giv (v, p, src_reg, dest_reg, mult_val, add_val, benefit,
3527 DEST_REG, not_every_iteration, NULL_PTR, loop_start,
3533 #ifndef DONT_REDUCE_ADDR
3534 /* Look for givs which are memory addresses. */
3535 /* This resulted in worse code on a VAX 8600. I wonder if it
3537 if (GET_CODE (p) == INSN)
3538 find_mem_givs (PATTERN (p), p, not_every_iteration, loop_start,
3542 /* Update the status of whether giv can derive other givs. This can
3543 change when we pass a label or an insn that updates a biv. */
3544 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
3545 || GET_CODE (p) == CODE_LABEL)
3546 update_giv_derive (p);
3548 /* Past a label or a jump, we get to insns for which we can't count
3549 on whether or how many times they will be executed during each
3551 /* This code appears in three places, once in scan_loop, and twice
3552 in strength_reduce. */
3553 if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
3554 /* If we enter the loop in the middle, and scan around
3555 to the beginning, don't set not_every_iteration for that.
3556 This can be any kind of jump, since we want to know if insns
3557 will be executed if the loop is executed. */
3558 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop_top
3559 && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p))
3560 || (NEXT_INSN (p) == loop_end && condjump_p (p)))))
3561 not_every_iteration = 1;
3563 /* At the virtual top of a converted loop, insns are again known to
3564 be executed each iteration: logically, the loop begins here
3565 even though the exit code has been duplicated. */
3567 else if (GET_CODE (p) == NOTE
3568 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP)
3569 not_every_iteration = 0;
3571 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3572 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3573 or not an insn is known to be executed each iteration of the
3574 loop, whether or not any iterations are known to occur.
3576 Therefore, if we have just passed a label and have no more labels
3577 between here and the test insn of the loop, we know these insns
3578 will be executed each iteration. */
3580 if (not_every_iteration && GET_CODE (p) == CODE_LABEL
3581 && no_labels_between_p (p, loop_end))
3582 not_every_iteration = 0;
3585 /* Try to calculate and save the number of loop iterations. This is
3586 set to zero if the actual number can not be calculated. This must
3587 be called after all giv's have been identified, since otherwise it may
3588 fail if the iteration variable is a giv. */
3590 loop_n_iterations = loop_iterations (loop_start, loop_end);
3592 /* Now for each giv for which we still don't know whether or not it is
3593 replaceable, check to see if it is replaceable because its final value
3594 can be calculated. This must be done after loop_iterations is called,
3595 so that final_giv_value will work correctly. */
3597 for (bl = loop_iv_list; bl; bl = bl->next)
3599 struct induction *v;
3601 for (v = bl->giv; v; v = v->next_iv)
3602 if (! v->replaceable && ! v->not_replaceable)
3603 check_final_value (v, loop_start, loop_end);
3606 /* Try to prove that the loop counter variable (if any) is always
3607 nonnegative; if so, record that fact with a REG_NONNEG note
3608 so that "decrement and branch until zero" insn can be used. */
3609 check_dbra_loop (loop_end, insn_count, loop_start);
3611 /* Create reg_map to hold substitutions for replaceable giv regs. */
3612 reg_map = (rtx *) alloca (max_reg_before_loop * sizeof (rtx));
3613 bzero ((char *) reg_map, max_reg_before_loop * sizeof (rtx));
3615 /* Examine each iv class for feasibility of strength reduction/induction
3616 variable elimination. */
3618 for (bl = loop_iv_list; bl; bl = bl->next)
3620 struct induction *v;
3623 rtx final_value = 0;
3625 /* Test whether it will be possible to eliminate this biv
3626 provided all givs are reduced. This is possible if either
3627 the reg is not used outside the loop, or we can compute
3628 what its final value will be.
3630 For architectures with a decrement_and_branch_until_zero insn,
3631 don't do this if we put a REG_NONNEG note on the endtest for
3634 /* Compare against bl->init_insn rather than loop_start.
3635 We aren't concerned with any uses of the biv between
3636 init_insn and loop_start since these won't be affected
3637 by the value of the biv elsewhere in the function, so
3638 long as init_insn doesn't use the biv itself.
3639 March 14, 1989 -- self@bayes.arc.nasa.gov */
3641 if ((uid_luid[regno_last_uid[bl->regno]] < INSN_LUID (loop_end)
3643 && INSN_UID (bl->init_insn) < max_uid_for_loop
3644 && uid_luid[regno_first_uid[bl->regno]] >= INSN_LUID (bl->init_insn)
3645 #ifdef HAVE_decrement_and_branch_until_zero
3648 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
3649 || ((final_value = final_biv_value (bl, loop_start, loop_end))
3650 #ifdef HAVE_decrement_and_branch_until_zero
3654 bl->eliminable = maybe_eliminate_biv (bl, loop_start, end, 0,
3655 threshold, insn_count);
3658 if (loop_dump_stream)
3660 fprintf (loop_dump_stream,
3661 "Cannot eliminate biv %d.\n",
3663 fprintf (loop_dump_stream,
3664 "First use: insn %d, last use: insn %d.\n",
3665 regno_first_uid[bl->regno],
3666 regno_last_uid[bl->regno]);
3670 /* Combine all giv's for this iv_class. */
3673 /* This will be true at the end, if all givs which depend on this
3674 biv have been strength reduced.
3675 We can't (currently) eliminate the biv unless this is so. */
3678 /* Check each giv in this class to see if we will benefit by reducing
3679 it. Skip giv's combined with others. */
3680 for (v = bl->giv; v; v = v->next_iv)
3682 struct induction *tv;
3684 if (v->ignore || v->same)
3687 benefit = v->benefit;
3689 /* Reduce benefit if not replaceable, since we will insert
3690 a move-insn to replace the insn that calculates this giv.
3691 Don't do this unless the giv is a user variable, since it
3692 will often be marked non-replaceable because of the duplication
3693 of the exit code outside the loop. In such a case, the copies
3694 we insert are dead and will be deleted. So they don't have
3695 a cost. Similar situations exist. */
3696 /* ??? The new final_[bg]iv_value code does a much better job
3697 of finding replaceable giv's, and hence this code may no longer
3699 if (! v->replaceable && ! bl->eliminable
3700 && REG_USERVAR_P (v->dest_reg))
3701 benefit -= copy_cost;
3703 /* Decrease the benefit to count the add-insns that we will
3704 insert to increment the reduced reg for the giv. */
3705 benefit -= add_cost * bl->biv_count;
3707 /* Decide whether to strength-reduce this giv or to leave the code
3708 unchanged (recompute it from the biv each time it is used).
3709 This decision can be made independently for each giv. */
3711 /* ??? Perhaps attempt to guess whether autoincrement will handle
3712 some of the new add insns; if so, can increase BENEFIT
3713 (undo the subtraction of add_cost that was done above). */
3715 /* If an insn is not to be strength reduced, then set its ignore
3716 flag, and clear all_reduced. */
3718 /* A giv that depends on a reversed biv must be reduced if it is
3719 used after the loop exit, otherwise, it would have the wrong
3720 value after the loop exit. To make it simple, just reduce all
3721 of such giv's whether or not we know they are used after the loop
3724 if (v->lifetime * threshold * benefit < insn_count
3727 if (loop_dump_stream)
3728 fprintf (loop_dump_stream,
3729 "giv of insn %d not worth while, %d vs %d.\n",
3731 v->lifetime * threshold * benefit, insn_count);
3737 /* Check that we can increment the reduced giv without a
3738 multiply insn. If not, reject it. */
3740 for (tv = bl->biv; tv; tv = tv->next_iv)
3741 if (tv->mult_val == const1_rtx
3742 && ! product_cheap_p (tv->add_val, v->mult_val))
3744 if (loop_dump_stream)
3745 fprintf (loop_dump_stream,
3746 "giv of insn %d: would need a multiply.\n",
3747 INSN_UID (v->insn));
3755 /* Reduce each giv that we decided to reduce. */
3757 for (v = bl->giv; v; v = v->next_iv)
3759 struct induction *tv;
3760 if (! v->ignore && v->same == 0)
3762 v->new_reg = gen_reg_rtx (v->mode);
3764 /* For each place where the biv is incremented,
3765 add an insn to increment the new, reduced reg for the giv. */
3766 for (tv = bl->biv; tv; tv = tv->next_iv)
3768 if (tv->mult_val == const1_rtx)
3769 emit_iv_add_mult (tv->add_val, v->mult_val,
3770 v->new_reg, v->new_reg, tv->insn);
3771 else /* tv->mult_val == const0_rtx */
3772 /* A multiply is acceptable here
3773 since this is presumed to be seldom executed. */
3774 emit_iv_add_mult (tv->add_val, v->mult_val,
3775 v->add_val, v->new_reg, tv->insn);
3778 /* Add code at loop start to initialize giv's reduced reg. */
3780 emit_iv_add_mult (bl->initial_value, v->mult_val,
3781 v->add_val, v->new_reg, loop_start);
3785 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
3788 For each giv register that can be reduced now: if replaceable,
3789 substitute reduced reg wherever the old giv occurs;
3790 else add new move insn "giv_reg = reduced_reg".
3792 Also check for givs whose first use is their definition and whose
3793 last use is the definition of another giv. If so, it is likely
3794 dead and should not be used to eliminate a biv. */
3795 for (v = bl->giv; v; v = v->next_iv)
3797 if (v->same && v->same->ignore)
3803 if (v->giv_type == DEST_REG
3804 && regno_first_uid[REGNO (v->dest_reg)] == INSN_UID (v->insn))
3806 struct induction *v1;
3808 for (v1 = bl->giv; v1; v1 = v1->next_iv)
3809 if (regno_last_uid[REGNO (v->dest_reg)] == INSN_UID (v1->insn))
3813 /* Update expression if this was combined, in case other giv was
3816 v->new_reg = replace_rtx (v->new_reg,
3817 v->same->dest_reg, v->same->new_reg);
3819 if (v->giv_type == DEST_ADDR)
3820 /* Store reduced reg as the address in the memref where we found
3822 *v->location = v->new_reg;
3823 else if (v->replaceable)
3825 reg_map[REGNO (v->dest_reg)] = v->new_reg;
3828 /* I can no longer duplicate the original problem. Perhaps
3829 this is unnecessary now? */
3831 /* Replaceable; it isn't strictly necessary to delete the old
3832 insn and emit a new one, because v->dest_reg is now dead.
3834 However, especially when unrolling loops, the special
3835 handling for (set REG0 REG1) in the second cse pass may
3836 make v->dest_reg live again. To avoid this problem, emit
3837 an insn to set the original giv reg from the reduced giv.
3838 We can not delete the original insn, since it may be part
3839 of a LIBCALL, and the code in flow that eliminates dead
3840 libcalls will fail if it is deleted. */
3841 emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
3847 /* Not replaceable; emit an insn to set the original giv reg from
3848 the reduced giv, same as above. */
3849 emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
3853 /* When a loop is reversed, givs which depend on the reversed
3854 biv, and which are live outside the loop, must be set to their
3855 correct final value. This insn is only needed if the giv is
3856 not replaceable. The correct final value is the same as the
3857 value that the giv starts the reversed loop with. */
3858 if (bl->reversed && ! v->replaceable)
3859 emit_iv_add_mult (bl->initial_value, v->mult_val,
3860 v->add_val, v->dest_reg, end_insert_before);
3861 else if (v->final_value)
3865 /* If the loop has multiple exits, emit the insn before the
3866 loop to ensure that it will always be executed no matter
3867 how the loop exits. Otherwise, emit the insn after the loop,
3868 since this is slightly more efficient. */
3869 if (loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]])
3870 insert_before = loop_start;
3872 insert_before = end_insert_before;
3873 emit_insn_before (gen_move_insn (v->dest_reg, v->final_value),
3877 /* If the insn to set the final value of the giv was emitted
3878 before the loop, then we must delete the insn inside the loop
3879 that sets it. If this is a LIBCALL, then we must delete
3880 every insn in the libcall. Note, however, that
3881 final_giv_value will only succeed when there are multiple
3882 exits if the giv is dead at each exit, hence it does not
3883 matter that the original insn remains because it is dead
3885 /* Delete the insn inside the loop that sets the giv since
3886 the giv is now set before (or after) the loop. */
3887 delete_insn (v->insn);
3891 if (loop_dump_stream)
3893 fprintf (loop_dump_stream, "giv at %d reduced to ",
3894 INSN_UID (v->insn));
3895 print_rtl (loop_dump_stream, v->new_reg);
3896 fprintf (loop_dump_stream, "\n");
3900 /* All the givs based on the biv bl have been reduced if they
3903 /* For each giv not marked as maybe dead that has been combined with a
3904 second giv, clear any "maybe dead" mark on that second giv.
3905 v->new_reg will either be or refer to the register of the giv it
3908 Doing this clearing avoids problems in biv elimination where a
3909 giv's new_reg is a complex value that can't be put in the insn but
3910 the giv combined with (with a reg as new_reg) is marked maybe_dead.
3911 Since the register will be used in either case, we'd prefer it be
3912 used from the simpler giv. */
3914 for (v = bl->giv; v; v = v->next_iv)
3915 if (! v->maybe_dead && v->same)
3916 v->same->maybe_dead = 0;
3918 /* Try to eliminate the biv, if it is a candidate.
3919 This won't work if ! all_reduced,
3920 since the givs we planned to use might not have been reduced.
3922 We have to be careful that we didn't initially think we could eliminate
3923 this biv because of a giv that we now think may be dead and shouldn't
3924 be used as a biv replacement.
3926 Also, there is the possibility that we may have a giv that looks
3927 like it can be used to eliminate a biv, but the resulting insn
3928 isn't valid. This can happen, for example, on the 88k, where a
3929 JUMP_INSN can compare a register only with zero. Attempts to
3930 replace it with a compare with a constant will fail.
3932 Note that in cases where this call fails, we may have replaced some
3933 of the occurrences of the biv with a giv, but no harm was done in
3934 doing so in the rare cases where it can occur. */
3936 if (all_reduced == 1 && bl->eliminable
3937 && maybe_eliminate_biv (bl, loop_start, end, 1,
3938 threshold, insn_count))
3941 /* ?? If we created a new test to bypass the loop entirely,
3942 or otherwise drop straight in, based on this test, then
3943 we might want to rewrite it also. This way some later
3944 pass has more hope of removing the initialization of this
3947 /* If final_value != 0, then the biv may be used after loop end
3948 and we must emit an insn to set it just in case.
3950 Reversed bivs already have an insn after the loop setting their
3951 value, so we don't need another one. We can't calculate the
3952 proper final value for such a biv here anyways. */
3953 if (final_value != 0 && ! bl->reversed)
3957 /* If the loop has multiple exits, emit the insn before the
3958 loop to ensure that it will always be executed no matter
3959 how the loop exits. Otherwise, emit the insn after the
3960 loop, since this is slightly more efficient. */
3961 if (loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]])
3962 insert_before = loop_start;
3964 insert_before = end_insert_before;
3966 emit_insn_before (gen_move_insn (bl->biv->dest_reg, final_value),
3971 /* Delete all of the instructions inside the loop which set
3972 the biv, as they are all dead. If is safe to delete them,
3973 because an insn setting a biv will never be part of a libcall. */
3974 /* However, deleting them will invalidate the regno_last_uid info,
3975 so keeping them around is more convenient. Final_biv_value
3976 will only succeed when there are multiple exits if the biv
3977 is dead at each exit, hence it does not matter that the original
3978 insn remains, because it is dead anyways. */
3979 for (v = bl->biv; v; v = v->next_iv)
3980 delete_insn (v->insn);
3983 if (loop_dump_stream)
3984 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
3989 /* Go through all the instructions in the loop, making all the
3990 register substitutions scheduled in REG_MAP. */
3992 for (p = loop_start; p != end; p = NEXT_INSN (p))
3993 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
3994 || GET_CODE (p) == CALL_INSN)
3996 replace_regs (PATTERN (p), reg_map, max_reg_before_loop, 0);
3997 replace_regs (REG_NOTES (p), reg_map, max_reg_before_loop, 0);
4001 /* Unroll loops from within strength reduction so that we can use the
4002 induction variable information that strength_reduce has already
4005 if (flag_unroll_loops)
4006 unroll_loop (loop_end, insn_count, loop_start, end_insert_before, 1);
4008 if (loop_dump_stream)
4009 fprintf (loop_dump_stream, "\n");
4012 /* Return 1 if X is a valid source for an initial value (or as value being
4013 compared against in an initial test).
4015 X must be either a register or constant and must not be clobbered between
4016 the current insn and the start of the loop.
4018 INSN is the insn containing X. */
4021 valid_initial_value_p (x, insn, call_seen, loop_start)
4030 /* Only consider pseudos we know about initialized in insns whose luids
4032 if (GET_CODE (x) != REG
4033 || REGNO (x) >= max_reg_before_loop)
4036 /* Don't use call-clobbered registers across a call which clobbers it. On
4037 some machines, don't use any hard registers at all. */
4038 if (REGNO (x) < FIRST_PSEUDO_REGISTER
4039 #ifndef SMALL_REGISTER_CLASSES
4040 && call_used_regs[REGNO (x)] && call_seen
4045 /* Don't use registers that have been clobbered before the start of the
4047 if (reg_set_between_p (x, insn, loop_start))
4053 /* Scan X for memory refs and check each memory address
4054 as a possible giv. INSN is the insn whose pattern X comes from.
4055 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
4056 every loop iteration. */
4059 find_mem_givs (x, insn, not_every_iteration, loop_start, loop_end)
4062 int not_every_iteration;
4063 rtx loop_start, loop_end;
4066 register enum rtx_code code;
4072 code = GET_CODE (x);
4096 benefit = general_induction_var (XEXP (x, 0),
4097 &src_reg, &add_val, &mult_val);
4099 /* Don't make a DEST_ADDR giv with mult_val == 1 && add_val == 0.
4100 Such a giv isn't useful. */
4101 if (benefit > 0 && (mult_val != const1_rtx || add_val != const0_rtx))
4103 /* Found one; record it. */
4105 = (struct induction *) oballoc (sizeof (struct induction));
4107 record_giv (v, insn, src_reg, addr_placeholder, mult_val,
4108 add_val, benefit, DEST_ADDR, not_every_iteration,
4109 &XEXP (x, 0), loop_start, loop_end);
4111 v->mem_mode = GET_MODE (x);
4117 /* Recursively scan the subexpressions for other mem refs. */
4119 fmt = GET_RTX_FORMAT (code);
4120 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4122 find_mem_givs (XEXP (x, i), insn, not_every_iteration, loop_start,
4124 else if (fmt[i] == 'E')
4125 for (j = 0; j < XVECLEN (x, i); j++)
4126 find_mem_givs (XVECEXP (x, i, j), insn, not_every_iteration,
4127 loop_start, loop_end);
4130 /* Fill in the data about one biv update.
4131 V is the `struct induction' in which we record the biv. (It is
4132 allocated by the caller, with alloca.)
4133 INSN is the insn that sets it.
4134 DEST_REG is the biv's reg.
4136 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
4137 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
4138 being set to INC_VAL.
4140 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
4141 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
4142 can be executed more than once per iteration. If MAYBE_MULTIPLE
4143 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
4144 executed exactly once per iteration. */
4147 record_biv (v, insn, dest_reg, inc_val, mult_val,
4148 not_every_iteration, maybe_multiple)
4149 struct induction *v;
4154 int not_every_iteration;
4157 struct iv_class *bl;
4160 v->src_reg = dest_reg;
4161 v->dest_reg = dest_reg;
4162 v->mult_val = mult_val;
4163 v->add_val = inc_val;
4164 v->mode = GET_MODE (dest_reg);
4165 v->always_computable = ! not_every_iteration;
4166 v->maybe_multiple = maybe_multiple;
4168 /* Add this to the reg's iv_class, creating a class
4169 if this is the first incrementation of the reg. */
4171 bl = reg_biv_class[REGNO (dest_reg)];
4174 /* Create and initialize new iv_class. */
4176 bl = (struct iv_class *) oballoc (sizeof (struct iv_class));
4178 bl->regno = REGNO (dest_reg);
4184 /* Set initial value to the reg itself. */
4185 bl->initial_value = dest_reg;
4186 /* We haven't seen the initializing insn yet */
4189 bl->initial_test = 0;
4190 bl->incremented = 0;
4194 bl->total_benefit = 0;
4196 /* Add this class to loop_iv_list. */
4197 bl->next = loop_iv_list;
4200 /* Put it in the array of biv register classes. */
4201 reg_biv_class[REGNO (dest_reg)] = bl;
4204 /* Update IV_CLASS entry for this biv. */
4205 v->next_iv = bl->biv;
4208 if (mult_val == const1_rtx)
4209 bl->incremented = 1;
4211 if (loop_dump_stream)
4213 fprintf (loop_dump_stream,
4214 "Insn %d: possible biv, reg %d,",
4215 INSN_UID (insn), REGNO (dest_reg));
4216 if (GET_CODE (inc_val) == CONST_INT)
4217 fprintf (loop_dump_stream, " const = %d\n",
4221 fprintf (loop_dump_stream, " const = ");
4222 print_rtl (loop_dump_stream, inc_val);
4223 fprintf (loop_dump_stream, "\n");
4228 /* Fill in the data about one giv.
4229 V is the `struct induction' in which we record the giv. (It is
4230 allocated by the caller, with alloca.)
4231 INSN is the insn that sets it.
4232 BENEFIT estimates the savings from deleting this insn.
4233 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
4234 into a register or is used as a memory address.
4236 SRC_REG is the biv reg which the giv is computed from.
4237 DEST_REG is the giv's reg (if the giv is stored in a reg).
4238 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
4239 LOCATION points to the place where this giv's value appears in INSN. */
4242 record_giv (v, insn, src_reg, dest_reg, mult_val, add_val, benefit,
4243 type, not_every_iteration, location, loop_start, loop_end)
4244 struct induction *v;
4248 rtx mult_val, add_val;
4251 int not_every_iteration;
4253 rtx loop_start, loop_end;
4255 struct induction *b;
4256 struct iv_class *bl;
4257 rtx set = single_set (insn);
4261 v->src_reg = src_reg;
4263 v->dest_reg = dest_reg;
4264 v->mult_val = mult_val;
4265 v->add_val = add_val;
4266 v->benefit = benefit;
4267 v->location = location;
4269 v->combined_with = 0;
4270 v->maybe_multiple = 0;
4272 v->derive_adjustment = 0;
4278 /* The v->always_computable field is used in update_giv_derive, to
4279 determine whether a giv can be used to derive another giv. For a
4280 DEST_REG giv, INSN computes a new value for the giv, so its value
4281 isn't computable if INSN insn't executed every iteration.
4282 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
4283 it does not compute a new value. Hence the value is always computable
4284 regardless of whether INSN is executed each iteration. */
4286 if (type == DEST_ADDR)
4287 v->always_computable = 1;
4289 v->always_computable = ! not_every_iteration;
4291 if (type == DEST_ADDR)
4293 v->mode = GET_MODE (*location);
4297 else /* type == DEST_REG */
4299 v->mode = GET_MODE (SET_DEST (set));
4301 v->lifetime = (uid_luid[regno_last_uid[REGNO (dest_reg)]]
4302 - uid_luid[regno_first_uid[REGNO (dest_reg)]]);
4304 v->times_used = n_times_used[REGNO (dest_reg)];
4306 /* If the lifetime is zero, it means that this register is
4307 really a dead store. So mark this as a giv that can be
4308 ignored. This will not prevent the biv from being eliminated. */
4309 if (v->lifetime == 0)
4312 reg_iv_type[REGNO (dest_reg)] = GENERAL_INDUCT;
4313 reg_iv_info[REGNO (dest_reg)] = v;
4316 /* Add the giv to the class of givs computed from one biv. */
4318 bl = reg_biv_class[REGNO (src_reg)];
4321 v->next_iv = bl->giv;
4323 /* Don't count DEST_ADDR. This is supposed to count the number of
4324 insns that calculate givs. */
4325 if (type == DEST_REG)
4327 bl->total_benefit += benefit;
4330 /* Fatal error, biv missing for this giv? */
4333 if (type == DEST_ADDR)
4337 /* The giv can be replaced outright by the reduced register only if all
4338 of the following conditions are true:
4339 - the insn that sets the giv is always executed on any iteration
4340 on which the giv is used at all
4341 (there are two ways to deduce this:
4342 either the insn is executed on every iteration,
4343 or all uses follow that insn in the same basic block),
4344 - the giv is not used outside the loop
4345 - no assignments to the biv occur during the giv's lifetime. */
4347 if (regno_first_uid[REGNO (dest_reg)] == INSN_UID (insn)
4348 /* Previous line always fails if INSN was moved by loop opt. */
4349 && uid_luid[regno_last_uid[REGNO (dest_reg)]] < INSN_LUID (loop_end)
4350 && (! not_every_iteration
4351 || last_use_this_basic_block (dest_reg, insn)))
4353 /* Now check that there are no assignments to the biv within the
4354 giv's lifetime. This requires two separate checks. */
4356 /* Check each biv update, and fail if any are between the first
4357 and last use of the giv.
4359 If this loop contains an inner loop that was unrolled, then
4360 the insn modifying the biv may have been emitted by the loop
4361 unrolling code, and hence does not have a valid luid. Just
4362 mark the biv as not replaceable in this case. It is not very
4363 useful as a biv, because it is used in two different loops.
4364 It is very unlikely that we would be able to optimize the giv
4365 using this biv anyways. */
4368 for (b = bl->biv; b; b = b->next_iv)
4370 if (INSN_UID (b->insn) >= max_uid_for_loop
4371 || ((uid_luid[INSN_UID (b->insn)]
4372 >= uid_luid[regno_first_uid[REGNO (dest_reg)]])
4373 && (uid_luid[INSN_UID (b->insn)]
4374 <= uid_luid[regno_last_uid[REGNO (dest_reg)]])))
4377 v->not_replaceable = 1;
4382 /* Check each insn between the first and last use of the giv,
4383 and fail if any of them are branches that jump to a named label
4384 outside this range, but still inside the loop. This catches
4385 cases of spaghetti code where the execution order of insns
4386 is not linear, and hence the above test fails. For example,
4387 in the following code, j is not replaceable:
4388 for (i = 0; i < 100; ) {
4389 L0: j = 4*i; goto L1;
4393 printf ("k = %d\n", k); }
4394 This test is conservative, but this test succeeds rarely enough
4395 that it isn't a problem. See also check_final_value below. */
4399 INSN_UID (p) >= max_uid_for_loop
4400 || INSN_LUID (p) < uid_luid[regno_last_uid[REGNO (dest_reg)]];
4403 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
4404 && LABEL_NAME (JUMP_LABEL (p))
4405 && ((INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop_start)
4406 && (INSN_LUID (JUMP_LABEL (p))
4407 < uid_luid[regno_first_uid[REGNO (dest_reg)]]))
4408 || (INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop_end)
4409 && (INSN_LUID (JUMP_LABEL (p))
4410 > uid_luid[regno_last_uid[REGNO (dest_reg)]]))))
4413 v->not_replaceable = 1;
4415 if (loop_dump_stream)
4416 fprintf (loop_dump_stream,
4417 "Found branch outside giv lifetime.\n");
4425 /* May still be replaceable, we don't have enough info here to
4428 v->not_replaceable = 0;
4432 if (loop_dump_stream)
4434 if (type == DEST_REG)
4435 fprintf (loop_dump_stream, "Insn %d: giv reg %d",
4436 INSN_UID (insn), REGNO (dest_reg));
4438 fprintf (loop_dump_stream, "Insn %d: dest address",
4441 fprintf (loop_dump_stream, " src reg %d benefit %d",
4442 REGNO (src_reg), v->benefit);
4443 fprintf (loop_dump_stream, " used %d lifetime %d",
4444 v->times_used, v->lifetime);
4447 fprintf (loop_dump_stream, " replaceable");
4449 if (GET_CODE (mult_val) == CONST_INT)
4450 fprintf (loop_dump_stream, " mult %d",
4454 fprintf (loop_dump_stream, " mult ");
4455 print_rtl (loop_dump_stream, mult_val);
4458 if (GET_CODE (add_val) == CONST_INT)
4459 fprintf (loop_dump_stream, " add %d",
4463 fprintf (loop_dump_stream, " add ");
4464 print_rtl (loop_dump_stream, add_val);
4468 if (loop_dump_stream)
4469 fprintf (loop_dump_stream, "\n");
4474 /* All this does is determine whether a giv can be made replaceable because
4475 its final value can be calculated. This code can not be part of record_giv
4476 above, because final_giv_value requires that the number of loop iterations
4477 be known, and that can not be accurately calculated until after all givs
4478 have been identified. */
4481 check_final_value (v, loop_start, loop_end)
4482 struct induction *v;
4483 rtx loop_start, loop_end;
4485 struct iv_class *bl;
4486 rtx final_value = 0;
4489 bl = reg_biv_class[REGNO (v->src_reg)];
4491 /* DEST_ADDR givs will never reach here, because they are always marked
4492 replaceable above in record_giv. */
4494 /* The giv can be replaced outright by the reduced register only if all
4495 of the following conditions are true:
4496 - the insn that sets the giv is always executed on any iteration
4497 on which the giv is used at all
4498 (there are two ways to deduce this:
4499 either the insn is executed on every iteration,
4500 or all uses follow that insn in the same basic block),
4501 - its final value can be calculated (this condition is different
4502 than the one above in record_giv)
4503 - no assignments to the biv occur during the giv's lifetime. */
4506 /* This is only called now when replaceable is known to be false. */
4507 /* Clear replaceable, so that it won't confuse final_giv_value. */
4511 if ((final_value = final_giv_value (v, loop_start, loop_end))
4512 && (v->always_computable || last_use_this_basic_block (v->dest_reg, v->insn)))
4514 int biv_increment_seen = 0;
4520 /* When trying to determine whether or not a biv increment occurs
4521 during the lifetime of the giv, we can ignore uses of the variable
4522 outside the loop because final_value is true. Hence we can not
4523 use regno_last_uid and regno_first_uid as above in record_giv. */
4525 /* Search the loop to determine whether any assignments to the
4526 biv occur during the giv's lifetime. Start with the insn
4527 that sets the giv, and search around the loop until we come
4528 back to that insn again.
4530 Also fail if there is a jump within the giv's lifetime that jumps
4531 to somewhere outside the lifetime but still within the loop. This
4532 catches spaghetti code where the execution order is not linear, and
4533 hence the above test fails. Here we assume that the giv lifetime
4534 does not extend from one iteration of the loop to the next, so as
4535 to make the test easier. Since the lifetime isn't known yet,
4536 this requires two loops. See also record_giv above. */
4538 last_giv_use = v->insn;
4544 p = NEXT_INSN (loop_start);
4548 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4549 || GET_CODE (p) == CALL_INSN)
4551 if (biv_increment_seen)
4553 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
4556 v->not_replaceable = 1;
4560 else if (GET_CODE (PATTERN (p)) == SET
4561 && SET_DEST (PATTERN (p)) == v->src_reg)
4562 biv_increment_seen = 1;
4563 else if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
4568 /* Now that the lifetime of the giv is known, check for branches
4569 from within the lifetime to outside the lifetime if it is still
4579 p = NEXT_INSN (loop_start);
4580 if (p == last_giv_use)
4583 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
4584 && LABEL_NAME (JUMP_LABEL (p))
4585 && ((INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (v->insn)
4586 && INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop_start))
4587 || (INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (last_giv_use)
4588 && INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop_end))))
4591 v->not_replaceable = 1;
4593 if (loop_dump_stream)
4594 fprintf (loop_dump_stream,
4595 "Found branch outside giv lifetime.\n");
4602 /* If it is replaceable, then save the final value. */
4604 v->final_value = final_value;
4607 if (loop_dump_stream && v->replaceable)
4608 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
4609 INSN_UID (v->insn), REGNO (v->dest_reg));
4612 /* Update the status of whether a giv can derive other givs.
4614 We need to do something special if there is or may be an update to the biv
4615 between the time the giv is defined and the time it is used to derive
4618 In addition, a giv that is only conditionally set is not allowed to
4619 derive another giv once a label has been passed.
4621 The cases we look at are when a label or an update to a biv is passed. */
4624 update_giv_derive (p)
4627 struct iv_class *bl;
4628 struct induction *biv, *giv;
4632 /* Search all IV classes, then all bivs, and finally all givs.
4634 There are three cases we are concerned with. First we have the situation
4635 of a giv that is only updated conditionally. In that case, it may not
4636 derive any givs after a label is passed.
4638 The second case is when a biv update occurs, or may occur, after the
4639 definition of a giv. For certain biv updates (see below) that are
4640 known to occur between the giv definition and use, we can adjust the
4641 giv definition. For others, or when the biv update is conditional,
4642 we must prevent the giv from deriving any other givs. There are two
4643 sub-cases within this case.
4645 If this is a label, we are concerned with any biv update that is done
4646 conditionally, since it may be done after the giv is defined followed by
4647 a branch here (actually, we need to pass both a jump and a label, but
4648 this extra tracking doesn't seem worth it).
4650 If this is a jump, we are concerned about any biv update that may be
4651 executed multiple times. We are actually only concerned about
4652 backward jumps, but it is probably not worth performing the test
4653 on the jump again here.
4655 If this is a biv update, we must adjust the giv status to show that a
4656 subsequent biv update was performed. If this adjustment cannot be done,
4657 the giv cannot derive further givs. */
4659 for (bl = loop_iv_list; bl; bl = bl->next)
4660 for (biv = bl->biv; biv; biv = biv->next_iv)
4661 if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
4664 for (giv = bl->giv; giv; giv = giv->next_iv)
4666 /* If cant_derive is already true, there is no point in
4667 checking all of these conditions again. */
4668 if (giv->cant_derive)
4671 /* If this giv is conditionally set and we have passed a label,
4672 it cannot derive anything. */
4673 if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
4674 giv->cant_derive = 1;
4676 /* Skip givs that have mult_val == 0, since
4677 they are really invariants. Also skip those that are
4678 replaceable, since we know their lifetime doesn't contain
4680 else if (giv->mult_val == const0_rtx || giv->replaceable)
4683 /* The only way we can allow this giv to derive another
4684 is if this is a biv increment and we can form the product
4685 of biv->add_val and giv->mult_val. In this case, we will
4686 be able to compute a compensation. */
4687 else if (biv->insn == p)
4691 if (biv->mult_val == const1_rtx)
4692 tem = simplify_giv_expr (gen_rtx (MULT, giv->mode,
4697 if (tem && giv->derive_adjustment)
4698 tem = simplify_giv_expr (gen_rtx (PLUS, giv->mode, tem,
4699 giv->derive_adjustment),
4702 giv->derive_adjustment = tem;
4704 giv->cant_derive = 1;
4706 else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
4707 || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
4708 giv->cant_derive = 1;
4713 /* Check whether an insn is an increment legitimate for a basic induction var.
4714 X is the source of insn P, or a part of it.
4715 MODE is the mode in which X should be interpreted.
4717 DEST_REG is the putative biv, also the destination of the insn.
4718 We accept patterns of these forms:
4719 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
4720 REG = INVARIANT + REG
4722 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
4723 and store the additive term into *INC_VAL.
4725 If X is an assignment of an invariant into DEST_REG, we set
4726 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
4728 We also want to detect a BIV when it corresponds to a variable
4729 whose mode was promoted via PROMOTED_MODE. In that case, an increment
4730 of the variable may be a PLUS that adds a SUBREG of that variable to
4731 an invariant and then sign- or zero-extends the result of the PLUS
4734 Most GIVs in such cases will be in the promoted mode, since that is the
4735 probably the natural computation mode (and almost certainly the mode
4736 used for addresses) on the machine. So we view the pseudo-reg containing
4737 the variable as the BIV, as if it were simply incremented.
4739 Note that treating the entire pseudo as a BIV will result in making
4740 simple increments to any GIVs based on it. However, if the variable
4741 overflows in its declared mode but not its promoted mode, the result will
4742 be incorrect. This is acceptable if the variable is signed, since
4743 overflows in such cases are undefined, but not if it is unsigned, since
4744 those overflows are defined. So we only check for SIGN_EXTEND and
4747 If we cannot find a biv, we return 0. */
4750 basic_induction_var (x, mode, dest_reg, p, inc_val, mult_val)
4752 enum machine_mode mode;
4758 register enum rtx_code code;
4762 code = GET_CODE (x);
4766 if (XEXP (x, 0) == dest_reg
4767 || (GET_CODE (XEXP (x, 0)) == SUBREG
4768 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
4769 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
4771 else if (XEXP (x, 1) == dest_reg
4772 || (GET_CODE (XEXP (x, 1)) == SUBREG
4773 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
4774 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
4779 if (invariant_p (arg) != 1)
4782 *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
4783 *mult_val = const1_rtx;
4787 /* If this is a SUBREG for a promoted variable, check the inner
4789 if (SUBREG_PROMOTED_VAR_P (x))
4790 return basic_induction_var (SUBREG_REG (x), GET_MODE (SUBREG_REG (x)),
4791 dest_reg, p, inc_val, mult_val);
4794 /* If this register is assigned in the previous insn, look at its
4795 source, but don't go outside the loop or past a label. */
4797 for (insn = PREV_INSN (p);
4798 (insn && GET_CODE (insn) == NOTE
4799 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
4800 insn = PREV_INSN (insn))
4804 set = single_set (insn);
4806 if (set != 0 && SET_DEST (set) == x)
4807 return basic_induction_var (SET_SRC (set),
4808 (GET_MODE (SET_SRC (set)) == VOIDmode
4810 : GET_MODE (SET_SRC (set))),
4813 /* ... fall through ... */
4815 /* Can accept constant setting of biv only when inside inner most loop.
4816 Otherwise, a biv of an inner loop may be incorrectly recognized
4817 as a biv of the outer loop,
4818 causing code to be moved INTO the inner loop. */
4820 if (invariant_p (x) != 1)
4825 if (loops_enclosed == 1)
4827 /* Possible bug here? Perhaps we don't know the mode of X. */
4828 *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0);
4829 *mult_val = const0_rtx;
4836 return basic_induction_var (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
4837 dest_reg, p, inc_val, mult_val);
4839 /* Similar, since this can be a sign extension. */
4840 for (insn = PREV_INSN (p);
4841 (insn && GET_CODE (insn) == NOTE
4842 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
4843 insn = PREV_INSN (insn))
4847 set = single_set (insn);
4849 if (set && SET_DEST (set) == XEXP (x, 0)
4850 && GET_CODE (XEXP (x, 1)) == CONST_INT
4851 && INTVAL (XEXP (x, 1)) >= 0
4852 && GET_CODE (SET_SRC (set)) == ASHIFT
4853 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
4854 return basic_induction_var (XEXP (SET_SRC (set), 0),
4855 GET_MODE (XEXP (x, 0)),
4856 dest_reg, insn, inc_val, mult_val);
4864 /* A general induction variable (giv) is any quantity that is a linear
4865 function of a basic induction variable,
4866 i.e. giv = biv * mult_val + add_val.
4867 The coefficients can be any loop invariant quantity.
4868 A giv need not be computed directly from the biv;
4869 it can be computed by way of other givs. */
4871 /* Determine whether X computes a giv.
4872 If it does, return a nonzero value
4873 which is the benefit from eliminating the computation of X;
4874 set *SRC_REG to the register of the biv that it is computed from;
4875 set *ADD_VAL and *MULT_VAL to the coefficients,
4876 such that the value of X is biv * mult + add; */
4879 general_induction_var (x, src_reg, add_val, mult_val)
4889 /* If this is an invariant, forget it, it isn't a giv. */
4890 if (invariant_p (x) == 1)
4893 /* See if the expression could be a giv and get its form.
4894 Mark our place on the obstack in case we don't find a giv. */
4895 storage = (char *) oballoc (0);
4896 x = simplify_giv_expr (x, &benefit);
4903 switch (GET_CODE (x))
4907 /* Since this is now an invariant and wasn't before, it must be a giv
4908 with MULT_VAL == 0. It doesn't matter which BIV we associate this
4910 *src_reg = loop_iv_list->biv->dest_reg;
4911 *mult_val = const0_rtx;
4916 /* This is equivalent to a BIV. */
4918 *mult_val = const1_rtx;
4919 *add_val = const0_rtx;
4923 /* Either (plus (biv) (invar)) or
4924 (plus (mult (biv) (invar_1)) (invar_2)). */
4925 if (GET_CODE (XEXP (x, 0)) == MULT)
4927 *src_reg = XEXP (XEXP (x, 0), 0);
4928 *mult_val = XEXP (XEXP (x, 0), 1);
4932 *src_reg = XEXP (x, 0);
4933 *mult_val = const1_rtx;
4935 *add_val = XEXP (x, 1);
4939 /* ADD_VAL is zero. */
4940 *src_reg = XEXP (x, 0);
4941 *mult_val = XEXP (x, 1);
4942 *add_val = const0_rtx;
4949 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
4950 unless they are CONST_INT). */
4951 if (GET_CODE (*add_val) == USE)
4952 *add_val = XEXP (*add_val, 0);
4953 if (GET_CODE (*mult_val) == USE)
4954 *mult_val = XEXP (*mult_val, 0);
4956 benefit += rtx_cost (orig_x, SET);
4958 /* Always return some benefit if this is a giv so it will be detected
4959 as such. This allows elimination of bivs that might otherwise
4960 not be eliminated. */
4961 return benefit == 0 ? 1 : benefit;
4964 /* Given an expression, X, try to form it as a linear function of a biv.
4965 We will canonicalize it to be of the form
4966 (plus (mult (BIV) (invar_1))
4968 with possible degeneracies.
4970 The invariant expressions must each be of a form that can be used as a
4971 machine operand. We surround then with a USE rtx (a hack, but localized
4972 and certainly unambiguous!) if not a CONST_INT for simplicity in this
4973 routine; it is the caller's responsibility to strip them.
4975 If no such canonicalization is possible (i.e., two biv's are used or an
4976 expression that is neither invariant nor a biv or giv), this routine
4979 For a non-zero return, the result will have a code of CONST_INT, USE,
4980 REG (for a BIV), PLUS, or MULT. No other codes will occur.
4982 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
4985 simplify_giv_expr (x, benefit)
4989 enum machine_mode mode = GET_MODE (x);
4993 /* If this is not an integer mode, or if we cannot do arithmetic in this
4994 mode, this can't be a giv. */
4995 if (mode != VOIDmode
4996 && (GET_MODE_CLASS (mode) != MODE_INT
4997 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
5000 switch (GET_CODE (x))
5003 arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
5004 arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
5005 if (arg0 == 0 || arg1 == 0)
5008 /* Put constant last, CONST_INT last if both constant. */
5009 if ((GET_CODE (arg0) == USE
5010 || GET_CODE (arg0) == CONST_INT)
5011 && GET_CODE (arg1) != CONST_INT)
5012 tem = arg0, arg0 = arg1, arg1 = tem;
5014 /* Handle addition of zero, then addition of an invariant. */
5015 if (arg1 == const0_rtx)
5017 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
5018 switch (GET_CODE (arg0))
5022 /* Both invariant. Only valid if sum is machine operand.
5023 First strip off possible USE on first operand. */
5024 if (GET_CODE (arg0) == USE)
5025 arg0 = XEXP (arg0, 0);
5028 if (CONSTANT_P (arg0) && GET_CODE (arg1) == CONST_INT)
5030 tem = plus_constant (arg0, INTVAL (arg1));
5031 if (GET_CODE (tem) != CONST_INT)
5032 tem = gen_rtx (USE, mode, tem);
5039 /* biv + invar or mult + invar. Return sum. */
5040 return gen_rtx (PLUS, mode, arg0, arg1);
5043 /* (a + invar_1) + invar_2. Associate. */
5044 return simplify_giv_expr (gen_rtx (PLUS, mode,
5046 gen_rtx (PLUS, mode,
5047 XEXP (arg0, 1), arg1)),
5054 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
5055 MULT to reduce cases. */
5056 if (GET_CODE (arg0) == REG)
5057 arg0 = gen_rtx (MULT, mode, arg0, const1_rtx);
5058 if (GET_CODE (arg1) == REG)
5059 arg1 = gen_rtx (MULT, mode, arg1, const1_rtx);
5061 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
5062 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
5063 Recurse to associate the second PLUS. */
5064 if (GET_CODE (arg1) == MULT)
5065 tem = arg0, arg0 = arg1, arg1 = tem;
5067 if (GET_CODE (arg1) == PLUS)
5068 return simplify_giv_expr (gen_rtx (PLUS, mode,
5069 gen_rtx (PLUS, mode,
5070 arg0, XEXP (arg1, 0)),
5074 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
5075 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
5078 if (XEXP (arg0, 0) != XEXP (arg1, 0))
5081 return simplify_giv_expr (gen_rtx (MULT, mode,
5083 gen_rtx (PLUS, mode,
5089 /* Handle "a - b" as "a + b * (-1)". */
5090 return simplify_giv_expr (gen_rtx (PLUS, mode,
5092 gen_rtx (MULT, mode,
5093 XEXP (x, 1), constm1_rtx)),
5097 arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
5098 arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
5099 if (arg0 == 0 || arg1 == 0)
5102 /* Put constant last, CONST_INT last if both constant. */
5103 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
5104 && GET_CODE (arg1) != CONST_INT)
5105 tem = arg0, arg0 = arg1, arg1 = tem;
5107 /* If second argument is not now constant, not giv. */
5108 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
5111 /* Handle multiply by 0 or 1. */
5112 if (arg1 == const0_rtx)
5115 else if (arg1 == const1_rtx)
5118 switch (GET_CODE (arg0))
5121 /* biv * invar. Done. */
5122 return gen_rtx (MULT, mode, arg0, arg1);
5125 /* Product of two constants. */
5126 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
5129 /* invar * invar. Not giv. */
5133 /* (a * invar_1) * invar_2. Associate. */
5134 return simplify_giv_expr (gen_rtx (MULT, mode,
5136 gen_rtx (MULT, mode,
5137 XEXP (arg0, 1), arg1)),
5141 /* (a + invar_1) * invar_2. Distribute. */
5142 return simplify_giv_expr (gen_rtx (PLUS, mode,
5143 gen_rtx (MULT, mode,
5144 XEXP (arg0, 0), arg1),
5145 gen_rtx (MULT, mode,
5146 XEXP (arg0, 1), arg1)),
5155 /* Shift by constant is multiply by power of two. */
5156 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
5159 return simplify_giv_expr (gen_rtx (MULT, mode,
5161 GEN_INT ((HOST_WIDE_INT) 1
5162 << INTVAL (XEXP (x, 1)))),
5166 /* "-a" is "a * (-1)" */
5167 return simplify_giv_expr (gen_rtx (MULT, mode, XEXP (x, 0), constm1_rtx),
5171 /* "~a" is "-a - 1". Silly, but easy. */
5172 return simplify_giv_expr (gen_rtx (MINUS, mode,
5173 gen_rtx (NEG, mode, XEXP (x, 0)),
5178 /* Already in proper form for invariant. */
5182 /* If this is a new register, we can't deal with it. */
5183 if (REGNO (x) >= max_reg_before_loop)
5186 /* Check for biv or giv. */
5187 switch (reg_iv_type[REGNO (x)])
5191 case GENERAL_INDUCT:
5193 struct induction *v = reg_iv_info[REGNO (x)];
5195 /* Form expression from giv and add benefit. Ensure this giv
5196 can derive another and subtract any needed adjustment if so. */
5197 *benefit += v->benefit;
5201 tem = gen_rtx (PLUS, mode, gen_rtx (MULT, mode,
5202 v->src_reg, v->mult_val),
5204 if (v->derive_adjustment)
5205 tem = gen_rtx (MINUS, mode, tem, v->derive_adjustment);
5206 return simplify_giv_expr (tem, benefit);
5210 /* Fall through to general case. */
5212 /* If invariant, return as USE (unless CONST_INT).
5213 Otherwise, not giv. */
5214 if (GET_CODE (x) == USE)
5217 if (invariant_p (x) == 1)
5219 if (GET_CODE (x) == CONST_INT)
5222 return gen_rtx (USE, mode, x);
5229 /* Help detect a giv that is calculated by several consecutive insns;
5233 The caller has already identified the first insn P as having a giv as dest;
5234 we check that all other insns that set the same register follow
5235 immediately after P, that they alter nothing else,
5236 and that the result of the last is still a giv.
5238 The value is 0 if the reg set in P is not really a giv.
5239 Otherwise, the value is the amount gained by eliminating
5240 all the consecutive insns that compute the value.
5242 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
5243 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
5245 The coefficients of the ultimate giv value are stored in
5246 *MULT_VAL and *ADD_VAL. */
5249 consec_sets_giv (first_benefit, p, src_reg, dest_reg,
5264 /* Indicate that this is a giv so that we can update the value produced in
5265 each insn of the multi-insn sequence.
5267 This induction structure will be used only by the call to
5268 general_induction_var below, so we can allocate it on our stack.
5269 If this is a giv, our caller will replace the induct var entry with
5270 a new induction structure. */
5272 = (struct induction *) alloca (sizeof (struct induction));
5273 v->src_reg = src_reg;
5274 v->mult_val = *mult_val;
5275 v->add_val = *add_val;
5276 v->benefit = first_benefit;
5278 v->derive_adjustment = 0;
5280 reg_iv_type[REGNO (dest_reg)] = GENERAL_INDUCT;
5281 reg_iv_info[REGNO (dest_reg)] = v;
5283 count = n_times_set[REGNO (dest_reg)] - 1;
5288 code = GET_CODE (p);
5290 /* If libcall, skip to end of call sequence. */
5291 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
5295 && (set = single_set (p))
5296 && GET_CODE (SET_DEST (set)) == REG
5297 && SET_DEST (set) == dest_reg
5298 && ((benefit = general_induction_var (SET_SRC (set), &src_reg,
5300 /* Giv created by equivalent expression. */
5301 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
5302 && (benefit = general_induction_var (XEXP (temp, 0), &src_reg,
5303 add_val, mult_val))))
5304 && src_reg == v->src_reg)
5306 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
5307 benefit += libcall_benefit (p);
5310 v->mult_val = *mult_val;
5311 v->add_val = *add_val;
5312 v->benefit = benefit;
5314 else if (code != NOTE)
5316 /* Allow insns that set something other than this giv to a
5317 constant. Such insns are needed on machines which cannot
5318 include long constants and should not disqualify a giv. */
5320 && (set = single_set (p))
5321 && SET_DEST (set) != dest_reg
5322 && CONSTANT_P (SET_SRC (set)))
5325 reg_iv_type[REGNO (dest_reg)] = UNKNOWN_INDUCT;
5333 /* Return an rtx, if any, that expresses giv G2 as a function of the register
5334 represented by G1. If no such expression can be found, or it is clear that
5335 it cannot possibly be a valid address, 0 is returned.
5337 To perform the computation, we note that
5340 where `v' is the biv.
5342 So G2 = (c/a) * G1 + (d - b*c/a) */
5346 express_from (g1, g2)
5347 struct induction *g1, *g2;
5351 /* The value that G1 will be multiplied by must be a constant integer. Also,
5352 the only chance we have of getting a valid address is if b*c/a (see above
5353 for notation) is also an integer. */
5354 if (GET_CODE (g1->mult_val) != CONST_INT
5355 || GET_CODE (g2->mult_val) != CONST_INT
5356 || GET_CODE (g1->add_val) != CONST_INT
5357 || g1->mult_val == const0_rtx
5358 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
5361 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
5362 add = plus_constant (g2->add_val, - INTVAL (g1->add_val) * INTVAL (mult));
5364 /* Form simplified final result. */
5365 if (mult == const0_rtx)
5367 else if (mult == const1_rtx)
5368 mult = g1->dest_reg;
5370 mult = gen_rtx (MULT, g2->mode, g1->dest_reg, mult);
5372 if (add == const0_rtx)
5375 return gen_rtx (PLUS, g2->mode, mult, add);
5379 /* Return 1 if giv G2 can be combined with G1. This means that G2 can use
5380 (either directly or via an address expression) a register used to represent
5381 G1. Set g2->new_reg to a represtation of G1 (normally just
5385 combine_givs_p (g1, g2)
5386 struct induction *g1, *g2;
5390 /* If these givs are identical, they can be combined. */
5391 if (rtx_equal_p (g1->mult_val, g2->mult_val)
5392 && rtx_equal_p (g1->add_val, g2->add_val))
5394 g2->new_reg = g1->dest_reg;
5399 /* If G2 can be expressed as a function of G1 and that function is valid
5400 as an address and no more expensive than using a register for G2,
5401 the expression of G2 in terms of G1 can be used. */
5402 if (g2->giv_type == DEST_ADDR
5403 && (tem = express_from (g1, g2)) != 0
5404 && memory_address_p (g2->mem_mode, tem)
5405 && ADDRESS_COST (tem) <= ADDRESS_COST (*g2->location))
5415 /* Check all pairs of givs for iv_class BL and see if any can be combined with
5416 any other. If so, point SAME to the giv combined with and set NEW_REG to
5417 be an expression (in terms of the other giv's DEST_REG) equivalent to the
5418 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
5422 struct iv_class *bl;
5424 struct induction *g1, *g2;
5427 for (g1 = bl->giv; g1; g1 = g1->next_iv)
5428 for (pass = 0; pass <= 1; pass++)
5429 for (g2 = bl->giv; g2; g2 = g2->next_iv)
5431 /* First try to combine with replaceable givs, then all givs. */
5432 && (g1->replaceable || pass == 1)
5433 /* If either has already been combined or is to be ignored, can't
5435 && ! g1->ignore && ! g2->ignore && ! g1->same && ! g2->same
5436 /* If something has been based on G2, G2 cannot itself be based
5437 on something else. */
5438 && ! g2->combined_with
5439 && combine_givs_p (g1, g2))
5441 /* g2->new_reg set by `combine_givs_p' */
5443 g1->combined_with = 1;
5444 g1->benefit += g2->benefit;
5445 /* ??? The new final_[bg]iv_value code does a much better job
5446 of finding replaceable giv's, and hence this code may no
5447 longer be necessary. */
5448 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
5449 g1->benefit -= copy_cost;
5450 g1->lifetime += g2->lifetime;
5451 g1->times_used += g2->times_used;
5453 if (loop_dump_stream)
5454 fprintf (loop_dump_stream, "giv at %d combined with giv at %d\n",
5455 INSN_UID (g2->insn), INSN_UID (g1->insn));
5459 /* EMIT code before INSERT_BEFORE to set REG = B * M + A. */
5462 emit_iv_add_mult (b, m, a, reg, insert_before)
5463 rtx b; /* initial value of basic induction variable */
5464 rtx m; /* multiplicative constant */
5465 rtx a; /* additive constant */
5466 rtx reg; /* destination register */
5472 /* Prevent unexpected sharing of these rtx. */
5476 /* Increase the lifetime of any invariants moved further in code. */
5477 update_reg_last_use (a, insert_before);
5478 update_reg_last_use (b, insert_before);
5479 update_reg_last_use (m, insert_before);
5482 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 0);
5484 emit_move_insn (reg, result);
5485 seq = gen_sequence ();
5488 emit_insn_before (seq, insert_before);
5491 /* Test whether A * B can be computed without
5492 an actual multiply insn. Value is 1 if so. */
5495 product_cheap_p (a, b)
5501 struct obstack *old_rtl_obstack = rtl_obstack;
5502 char *storage = (char *) obstack_alloc (&temp_obstack, 0);
5505 /* If only one is constant, make it B. */
5506 if (GET_CODE (a) == CONST_INT)
5507 tmp = a, a = b, b = tmp;
5509 /* If first constant, both constant, so don't need multiply. */
5510 if (GET_CODE (a) == CONST_INT)
5513 /* If second not constant, neither is constant, so would need multiply. */
5514 if (GET_CODE (b) != CONST_INT)
5517 /* One operand is constant, so might not need multiply insn. Generate the
5518 code for the multiply and see if a call or multiply, or long sequence
5519 of insns is generated. */
5521 rtl_obstack = &temp_obstack;
5523 expand_mult (GET_MODE (a), a, b, NULL_RTX, 0);
5524 tmp = gen_sequence ();
5527 if (GET_CODE (tmp) == SEQUENCE)
5529 if (XVEC (tmp, 0) == 0)
5531 else if (XVECLEN (tmp, 0) > 3)
5534 for (i = 0; i < XVECLEN (tmp, 0); i++)
5536 rtx insn = XVECEXP (tmp, 0, i);
5538 if (GET_CODE (insn) != INSN
5539 || (GET_CODE (PATTERN (insn)) == SET
5540 && GET_CODE (SET_SRC (PATTERN (insn))) == MULT)
5541 || (GET_CODE (PATTERN (insn)) == PARALLEL
5542 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET
5543 && GET_CODE (SET_SRC (XVECEXP (PATTERN (insn), 0, 0))) == MULT))
5550 else if (GET_CODE (tmp) == SET
5551 && GET_CODE (SET_SRC (tmp)) == MULT)
5553 else if (GET_CODE (tmp) == PARALLEL
5554 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
5555 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
5558 /* Free any storage we obtained in generating this multiply and restore rtl
5559 allocation to its normal obstack. */
5560 obstack_free (&temp_obstack, storage);
5561 rtl_obstack = old_rtl_obstack;
5566 /* Check to see if loop can be terminated by a "decrement and branch until
5567 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
5568 Also try reversing an increment loop to a decrement loop
5569 to see if the optimization can be performed.
5570 Value is nonzero if optimization was performed. */
5572 /* This is useful even if the architecture doesn't have such an insn,
5573 because it might change a loops which increments from 0 to n to a loop
5574 which decrements from n to 0. A loop that decrements to zero is usually
5575 faster than one that increments from zero. */
5577 /* ??? This could be rewritten to use some of the loop unrolling procedures,
5578 such as approx_final_value, biv_total_increment, loop_iterations, and
5579 final_[bg]iv_value. */
5582 check_dbra_loop (loop_end, insn_count, loop_start)
5587 struct iv_class *bl;
5592 enum rtx_code branch_code;
5595 rtx before_comparison;
5598 /* If last insn is a conditional branch, and the insn before tests a
5599 register value, try to optimize it. Otherwise, we can't do anything. */
5601 comparison = get_condition_for_loop (PREV_INSN (loop_end));
5602 if (comparison == 0)
5605 /* Check all of the bivs to see if the compare uses one of them.
5606 Skip biv's set more than once because we can't guarantee that
5607 it will be zero on the last iteration. Also skip if the biv is
5608 used between its update and the test insn. */
5610 for (bl = loop_iv_list; bl; bl = bl->next)
5612 if (bl->biv_count == 1
5613 && bl->biv->dest_reg == XEXP (comparison, 0)
5614 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
5615 PREV_INSN (PREV_INSN (loop_end))))
5622 /* Look for the case where the basic induction variable is always
5623 nonnegative, and equals zero on the last iteration.
5624 In this case, add a reg_note REG_NONNEG, which allows the
5625 m68k DBRA instruction to be used. */
5627 if (((GET_CODE (comparison) == GT
5628 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
5629 && INTVAL (XEXP (comparison, 1)) == -1)
5630 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
5631 && GET_CODE (bl->biv->add_val) == CONST_INT
5632 && INTVAL (bl->biv->add_val) < 0)
5634 /* Initial value must be greater than 0,
5635 init_val % -dec_value == 0 to ensure that it equals zero on
5636 the last iteration */
5638 if (GET_CODE (bl->initial_value) == CONST_INT
5639 && INTVAL (bl->initial_value) > 0
5640 && (INTVAL (bl->initial_value) %
5641 (-INTVAL (bl->biv->add_val))) == 0)
5643 /* register always nonnegative, add REG_NOTE to branch */
5644 REG_NOTES (PREV_INSN (loop_end))
5645 = gen_rtx (EXPR_LIST, REG_NONNEG, NULL_RTX,
5646 REG_NOTES (PREV_INSN (loop_end)));
5652 /* If the decrement is 1 and the value was tested as >= 0 before
5653 the loop, then we can safely optimize. */
5654 for (p = loop_start; p; p = PREV_INSN (p))
5656 if (GET_CODE (p) == CODE_LABEL)
5658 if (GET_CODE (p) != JUMP_INSN)
5661 before_comparison = get_condition_for_loop (p);
5662 if (before_comparison
5663 && XEXP (before_comparison, 0) == bl->biv->dest_reg
5664 && GET_CODE (before_comparison) == LT
5665 && XEXP (before_comparison, 1) == const0_rtx
5666 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
5667 && INTVAL (bl->biv->add_val) == -1)
5669 REG_NOTES (PREV_INSN (loop_end))
5670 = gen_rtx (EXPR_LIST, REG_NONNEG, NULL_RTX,
5671 REG_NOTES (PREV_INSN (loop_end)));
5678 else if (num_mem_sets <= 1)
5680 /* Try to change inc to dec, so can apply above optimization. */
5682 all registers modified are induction variables or invariant,
5683 all memory references have non-overlapping addresses
5684 (obviously true if only one write)
5685 allow 2 insns for the compare/jump at the end of the loop. */
5686 int num_nonfixed_reads = 0;
5687 /* 1 if the iteration var is used only to count iterations. */
5688 int no_use_except_counting = 0;
5689 /* 1 if the loop has no memory store, or it has a single memory store
5690 which is reversible. */
5691 int reversible_mem_store = 1;
5693 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
5694 if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
5695 num_nonfixed_reads += count_nonfixed_reads (PATTERN (p));
5697 if (bl->giv_count == 0
5698 && ! loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]])
5700 rtx bivreg = regno_reg_rtx[bl->regno];
5702 /* If there are no givs for this biv, and the only exit is the
5703 fall through at the end of the the loop, then
5704 see if perhaps there are no uses except to count. */
5705 no_use_except_counting = 1;
5706 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
5707 if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
5709 rtx set = single_set (p);
5711 if (set && GET_CODE (SET_DEST (set)) == REG
5712 && REGNO (SET_DEST (set)) == bl->regno)
5713 /* An insn that sets the biv is okay. */
5715 else if (p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
5716 || p == prev_nonnote_insn (loop_end))
5717 /* Don't bother about the end test. */
5719 else if (reg_mentioned_p (bivreg, PATTERN (p)))
5720 /* Any other use of the biv is no good. */
5722 no_use_except_counting = 0;
5728 /* If the loop has a single store, and the destination address is
5729 invariant, then we can't reverse the loop, because this address
5730 might then have the wrong value at loop exit.
5731 This would work if the source was invariant also, however, in that
5732 case, the insn should have been moved out of the loop. */
5734 if (num_mem_sets == 1)
5735 reversible_mem_store
5736 = (! unknown_address_altered
5737 && ! invariant_p (XEXP (loop_store_mems[0], 0)));
5739 /* This code only acts for innermost loops. Also it simplifies
5740 the memory address check by only reversing loops with
5741 zero or one memory access.
5742 Two memory accesses could involve parts of the same array,
5743 and that can't be reversed. */
5745 if (num_nonfixed_reads <= 1
5747 && !loop_has_volatile
5748 && reversible_mem_store
5749 && (no_use_except_counting
5750 || (bl->giv_count + bl->biv_count + num_mem_sets
5751 + num_movables + 2 == insn_count)))
5753 rtx condition = get_condition_for_loop (PREV_INSN (loop_end));
5757 /* Loop can be reversed. */
5758 if (loop_dump_stream)
5759 fprintf (loop_dump_stream, "Can reverse loop\n");
5761 /* Now check other conditions:
5762 initial_value must be zero,
5763 final_value % add_val == 0, so that when reversed, the
5764 biv will be zero on the last iteration.
5766 This test can probably be improved since +/- 1 in the constant
5767 can be obtained by changing LT to LE and vice versa; this is
5770 if (comparison && bl->initial_value == const0_rtx
5771 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
5772 /* LE gets turned into LT */
5773 && GET_CODE (comparison) == LT
5774 && (INTVAL (XEXP (comparison, 1))
5775 % INTVAL (bl->biv->add_val)) == 0)
5777 /* Register will always be nonnegative, with value
5778 0 on last iteration if loop reversed */
5780 /* Save some info needed to produce the new insns. */
5781 reg = bl->biv->dest_reg;
5782 jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 1);
5783 new_add_val = GEN_INT (- INTVAL (bl->biv->add_val));
5785 final_value = XEXP (comparison, 1);
5786 start_value = GEN_INT (INTVAL (XEXP (comparison, 1))
5787 - INTVAL (bl->biv->add_val));
5789 /* Initialize biv to start_value before loop start.
5790 The old initializing insn will be deleted as a
5791 dead store by flow.c. */
5792 emit_insn_before (gen_move_insn (reg, start_value), loop_start);
5794 /* Add insn to decrement register, and delete insn
5795 that incremented the register. */
5796 p = emit_insn_before (gen_add2_insn (reg, new_add_val),
5798 delete_insn (bl->biv->insn);
5800 /* Update biv info to reflect its new status. */
5802 bl->initial_value = start_value;
5803 bl->biv->add_val = new_add_val;
5805 /* Inc LABEL_NUSES so that delete_insn will
5806 not delete the label. */
5807 LABEL_NUSES (XEXP (jump_label, 0)) ++;
5809 /* Emit an insn after the end of the loop to set the biv's
5810 proper exit value if it is used anywhere outside the loop. */
5811 if ((regno_last_uid[bl->regno]
5812 != INSN_UID (PREV_INSN (PREV_INSN (loop_end))))
5814 || regno_first_uid[bl->regno] != INSN_UID (bl->init_insn))
5815 emit_insn_after (gen_move_insn (reg, final_value),
5818 /* Delete compare/branch at end of loop. */
5819 delete_insn (PREV_INSN (loop_end));
5820 delete_insn (PREV_INSN (loop_end));
5822 /* Add new compare/branch insn at end of loop. */
5824 emit_cmp_insn (reg, const0_rtx, GE, NULL_RTX,
5825 GET_MODE (reg), 0, 0);
5826 emit_jump_insn (gen_bge (XEXP (jump_label, 0)));
5827 tem = gen_sequence ();
5829 emit_jump_insn_before (tem, loop_end);
5831 for (tem = PREV_INSN (loop_end);
5832 tem && GET_CODE (tem) != JUMP_INSN; tem = PREV_INSN (tem))
5836 JUMP_LABEL (tem) = XEXP (jump_label, 0);
5838 /* Increment of LABEL_NUSES done above. */
5839 /* Register is now always nonnegative,
5840 so add REG_NONNEG note to the branch. */
5841 REG_NOTES (tem) = gen_rtx (EXPR_LIST, REG_NONNEG, NULL_RTX,
5847 /* Mark that this biv has been reversed. Each giv which depends
5848 on this biv, and which is also live past the end of the loop
5849 will have to be fixed up. */
5853 if (loop_dump_stream)
5854 fprintf (loop_dump_stream,
5855 "Reversed loop and added reg_nonneg\n");
5865 /* Verify whether the biv BL appears to be eliminable,
5866 based on the insns in the loop that refer to it.
5867 LOOP_START is the first insn of the loop, and END is the end insn.
5869 If ELIMINATE_P is non-zero, actually do the elimination.
5871 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
5872 determine whether invariant insns should be placed inside or at the
5873 start of the loop. */
5876 maybe_eliminate_biv (bl, loop_start, end, eliminate_p, threshold, insn_count)
5877 struct iv_class *bl;
5881 int threshold, insn_count;
5883 rtx reg = bl->biv->dest_reg;
5885 struct induction *v;
5887 /* Scan all insns in the loop, stopping if we find one that uses the
5888 biv in a way that we cannot eliminate. */
5890 for (p = loop_start; p != end; p = NEXT_INSN (p))
5892 enum rtx_code code = GET_CODE (p);
5893 rtx where = threshold >= insn_count ? loop_start : p;
5895 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
5896 && reg_mentioned_p (reg, PATTERN (p))
5897 && ! maybe_eliminate_biv_1 (PATTERN (p), p, bl, eliminate_p, where))
5899 if (loop_dump_stream)
5900 fprintf (loop_dump_stream,
5901 "Cannot eliminate biv %d: biv used in insn %d.\n",
5902 bl->regno, INSN_UID (p));
5909 if (loop_dump_stream)
5910 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
5911 bl->regno, eliminate_p ? "was" : "can be");
5918 /* If BL appears in X (part of the pattern of INSN), see if we can
5919 eliminate its use. If so, return 1. If not, return 0.
5921 If BIV does not appear in X, return 1.
5923 If ELIMINATE_P is non-zero, actually do the elimination. WHERE indicates
5924 where extra insns should be added. Depending on how many items have been
5925 moved out of the loop, it will either be before INSN or at the start of
5929 maybe_eliminate_biv_1 (x, insn, bl, eliminate_p, where)
5931 struct iv_class *bl;
5935 enum rtx_code code = GET_CODE (x);
5936 rtx reg = bl->biv->dest_reg;
5937 enum machine_mode mode = GET_MODE (reg);
5938 struct induction *v;
5947 /* If we haven't already been able to do something with this BIV,
5948 we can't eliminate it. */
5954 /* If this sets the BIV, it is not a problem. */
5955 if (SET_DEST (x) == reg)
5958 /* If this is an insn that defines a giv, it is also ok because
5959 it will go away when the giv is reduced. */
5960 for (v = bl->giv; v; v = v->next_iv)
5961 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
5965 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
5967 /* Can replace with any giv that was reduced and
5968 that has (MULT_VAL != 0) and (ADD_VAL == 0).
5969 Require a constant for MULT_VAL, so we know it's nonzero. */
5971 for (v = bl->giv; v; v = v->next_iv)
5972 if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx
5973 && v->add_val == const0_rtx
5974 && ! v->ignore && ! v->maybe_dead
5980 /* If the giv has the opposite direction of change,
5981 then reverse the comparison. */
5982 if (INTVAL (v->mult_val) < 0)
5983 new = gen_rtx (COMPARE, GET_MODE (v->new_reg),
5984 const0_rtx, v->new_reg);
5988 /* We can probably test that giv's reduced reg. */
5989 if (validate_change (insn, &SET_SRC (x), new, 0))
5993 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
5994 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
5995 Require a constant for MULT_VAL, so we know it's nonzero. */
5997 for (v = bl->giv; v; v = v->next_iv)
5998 if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx
5999 && ! v->ignore && ! v->maybe_dead
6005 /* If the giv has the opposite direction of change,
6006 then reverse the comparison. */
6007 if (INTVAL (v->mult_val) < 0)
6008 new = gen_rtx (COMPARE, VOIDmode, copy_rtx (v->add_val),
6011 new = gen_rtx (COMPARE, VOIDmode, v->new_reg,
6012 copy_rtx (v->add_val));
6014 /* Replace biv with the giv's reduced register. */
6015 update_reg_last_use (v->add_val, insn);
6016 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
6019 /* Insn doesn't support that constant or invariant. Copy it
6020 into a register (it will be a loop invariant.) */
6021 tem = gen_reg_rtx (GET_MODE (v->new_reg));
6023 emit_insn_before (gen_move_insn (tem, copy_rtx (v->add_val)),
6026 if (validate_change (insn, &SET_SRC (PATTERN (insn)),
6027 gen_rtx (COMPARE, VOIDmode,
6028 v->new_reg, tem), 0))
6037 case GT: case GE: case GTU: case GEU:
6038 case LT: case LE: case LTU: case LEU:
6039 /* See if either argument is the biv. */
6040 if (XEXP (x, 0) == reg)
6041 arg = XEXP (x, 1), arg_operand = 1;
6042 else if (XEXP (x, 1) == reg)
6043 arg = XEXP (x, 0), arg_operand = 0;
6047 if (CONSTANT_P (arg))
6049 /* First try to replace with any giv that has constant positive
6050 mult_val and constant add_val. We might be able to support
6051 negative mult_val, but it seems complex to do it in general. */
6053 for (v = bl->giv; v; v = v->next_iv)
6054 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
6055 && CONSTANT_P (v->add_val)
6056 && ! v->ignore && ! v->maybe_dead
6062 /* Replace biv with the giv's reduced reg. */
6063 XEXP (x, 1-arg_operand) = v->new_reg;
6065 /* If all constants are actually constant integers and
6066 the derived constant can be directly placed in the COMPARE,
6068 if (GET_CODE (arg) == CONST_INT
6069 && GET_CODE (v->mult_val) == CONST_INT
6070 && GET_CODE (v->add_val) == CONST_INT
6071 && validate_change (insn, &XEXP (x, arg_operand),
6072 GEN_INT (INTVAL (arg)
6073 * INTVAL (v->mult_val)
6074 + INTVAL (v->add_val)), 0))
6077 /* Otherwise, load it into a register. */
6078 tem = gen_reg_rtx (mode);
6079 emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
6080 if (validate_change (insn, &XEXP (x, arg_operand), tem, 0))
6083 /* If that failed, put back the change we made above. */
6084 XEXP (x, 1-arg_operand) = reg;
6087 /* Look for giv with positive constant mult_val and nonconst add_val.
6088 Insert insns to calculate new compare value. */
6090 for (v = bl->giv; v; v = v->next_iv)
6091 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
6092 && ! v->ignore && ! v->maybe_dead
6100 tem = gen_reg_rtx (mode);
6102 /* Replace biv with giv's reduced register. */
6103 validate_change (insn, &XEXP (x, 1 - arg_operand),
6106 /* Compute value to compare against. */
6107 emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
6108 /* Use it in this insn. */
6109 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
6110 if (apply_change_group ())
6114 else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
6116 if (invariant_p (arg) == 1)
6118 /* Look for giv with constant positive mult_val and nonconst
6119 add_val. Insert insns to compute new compare value. */
6121 for (v = bl->giv; v; v = v->next_iv)
6122 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
6123 && ! v->ignore && ! v->maybe_dead
6131 tem = gen_reg_rtx (mode);
6133 /* Replace biv with giv's reduced register. */
6134 validate_change (insn, &XEXP (x, 1 - arg_operand),
6137 /* Compute value to compare against. */
6138 emit_iv_add_mult (arg, v->mult_val, v->add_val,
6140 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
6141 if (apply_change_group ())
6146 /* This code has problems. Basically, you can't know when
6147 seeing if we will eliminate BL, whether a particular giv
6148 of ARG will be reduced. If it isn't going to be reduced,
6149 we can't eliminate BL. We can try forcing it to be reduced,
6150 but that can generate poor code.
6152 The problem is that the benefit of reducing TV, below should
6153 be increased if BL can actually be eliminated, but this means
6154 we might have to do a topological sort of the order in which
6155 we try to process biv. It doesn't seem worthwhile to do
6156 this sort of thing now. */
6159 /* Otherwise the reg compared with had better be a biv. */
6160 if (GET_CODE (arg) != REG
6161 || reg_iv_type[REGNO (arg)] != BASIC_INDUCT)
6164 /* Look for a pair of givs, one for each biv,
6165 with identical coefficients. */
6166 for (v = bl->giv; v; v = v->next_iv)
6168 struct induction *tv;
6170 if (v->ignore || v->maybe_dead || v->mode != mode)
6173 for (tv = reg_biv_class[REGNO (arg)]->giv; tv; tv = tv->next_iv)
6174 if (! tv->ignore && ! tv->maybe_dead
6175 && rtx_equal_p (tv->mult_val, v->mult_val)
6176 && rtx_equal_p (tv->add_val, v->add_val)
6177 && tv->mode == mode)
6182 /* Replace biv with its giv's reduced reg. */
6183 XEXP (x, 1-arg_operand) = v->new_reg;
6184 /* Replace other operand with the other giv's
6186 XEXP (x, arg_operand) = tv->new_reg;
6193 /* If we get here, the biv can't be eliminated. */
6197 /* If this address is a DEST_ADDR giv, it doesn't matter if the
6198 biv is used in it, since it will be replaced. */
6199 for (v = bl->giv; v; v = v->next_iv)
6200 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
6205 /* See if any subexpression fails elimination. */
6206 fmt = GET_RTX_FORMAT (code);
6207 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6212 if (! maybe_eliminate_biv_1 (XEXP (x, i), insn, bl,
6213 eliminate_p, where))
6218 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6219 if (! maybe_eliminate_biv_1 (XVECEXP (x, i, j), insn, bl,
6220 eliminate_p, where))
6229 /* Return nonzero if the last use of REG
6230 is in an insn following INSN in the same basic block. */
6233 last_use_this_basic_block (reg, insn)
6239 n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
6242 if (regno_last_uid[REGNO (reg)] == INSN_UID (n))
6248 /* Called via `note_stores' to record the initial value of a biv. Here we
6249 just record the location of the set and process it later. */
6252 record_initial (dest, set)
6256 struct iv_class *bl;
6258 if (GET_CODE (dest) != REG
6259 || REGNO (dest) >= max_reg_before_loop
6260 || reg_iv_type[REGNO (dest)] != BASIC_INDUCT)
6263 bl = reg_biv_class[REGNO (dest)];
6265 /* If this is the first set found, record it. */
6266 if (bl->init_insn == 0)
6268 bl->init_insn = note_insn;
6273 /* If any of the registers in X are "old" and currently have a last use earlier
6274 than INSN, update them to have a last use of INSN. Their actual last use
6275 will be the previous insn but it will not have a valid uid_luid so we can't
6279 update_reg_last_use (x, insn)
6283 /* Check for the case where INSN does not have a valid luid. In this case,
6284 there is no need to modify the regno_last_uid, as this can only happen
6285 when code is inserted after the loop_end to set a pseudo's final value,
6286 and hence this insn will never be the last use of x. */
6287 if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
6288 && INSN_UID (insn) < max_uid_for_loop
6289 && uid_luid[regno_last_uid[REGNO (x)]] < uid_luid[INSN_UID (insn)])
6290 regno_last_uid[REGNO (x)] = INSN_UID (insn);
6294 register char *fmt = GET_RTX_FORMAT (GET_CODE (x));
6295 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6298 update_reg_last_use (XEXP (x, i), insn);
6299 else if (fmt[i] == 'E')
6300 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6301 update_reg_last_use (XVECEXP (x, i, j), insn);
6306 /* Given a jump insn JUMP, return the condition that will cause it to branch
6307 to its JUMP_LABEL. If the condition cannot be understood, or is an
6308 inequality floating-point comparison which needs to be reversed, 0 will
6311 If EARLIEST is non-zero, it is a pointer to a place where the earliest
6312 insn used in locating the condition was found. If a replacement test
6313 of the condition is desired, it should be placed in front of that
6314 insn and we will be sure that the inputs are still valid.
6316 The condition will be returned in a canonical form to simplify testing by
6317 callers. Specifically:
6319 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
6320 (2) Both operands will be machine operands; (cc0) will have been replaced.
6321 (3) If an operand is a constant, it will be the second operand.
6322 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
6323 for GE, GEU, and LEU. */
6326 get_condition (jump, earliest)
6335 int reverse_code = 0;
6336 int did_reverse_condition = 0;
6338 /* If this is not a standard conditional jump, we can't parse it. */
6339 if (GET_CODE (jump) != JUMP_INSN
6340 || ! condjump_p (jump) || simplejump_p (jump))
6343 code = GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 0));
6344 op0 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 0);
6345 op1 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 1);
6350 /* If this branches to JUMP_LABEL when the condition is false, reverse
6352 if (GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 2)) == LABEL_REF
6353 && XEXP (XEXP (SET_SRC (PATTERN (jump)), 2), 0) == JUMP_LABEL (jump))
6354 code = reverse_condition (code), did_reverse_condition ^= 1;
6356 /* If we are comparing a register with zero, see if the register is set
6357 in the previous insn to a COMPARE or a comparison operation. Perform
6358 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
6361 while (GET_RTX_CLASS (code) == '<' && op1 == const0_rtx)
6363 /* Set non-zero when we find something of interest. */
6367 /* If comparison with cc0, import actual comparison from compare
6371 if ((prev = prev_nonnote_insn (prev)) == 0
6372 || GET_CODE (prev) != INSN
6373 || (set = single_set (prev)) == 0
6374 || SET_DEST (set) != cc0_rtx)
6377 op0 = SET_SRC (set);
6378 op1 = CONST0_RTX (GET_MODE (op0));
6384 /* If this is a COMPARE, pick up the two things being compared. */
6385 if (GET_CODE (op0) == COMPARE)
6387 op1 = XEXP (op0, 1);
6388 op0 = XEXP (op0, 0);
6391 else if (GET_CODE (op0) != REG)
6394 /* Go back to the previous insn. Stop if it is not an INSN. We also
6395 stop if it isn't a single set or if it has a REG_INC note because
6396 we don't want to bother dealing with it. */
6398 if ((prev = prev_nonnote_insn (prev)) == 0
6399 || GET_CODE (prev) != INSN
6400 || FIND_REG_INC_NOTE (prev, 0)
6401 || (set = single_set (prev)) == 0)
6404 /* If this is setting OP0, get what it sets it to if it looks
6406 if (SET_DEST (set) == op0)
6408 enum machine_mode inner_mode = GET_MODE (SET_SRC (set));
6410 if ((GET_CODE (SET_SRC (set)) == COMPARE
6413 && GET_MODE_CLASS (inner_mode) == MODE_INT
6414 && (GET_MODE_BITSIZE (inner_mode)
6415 <= HOST_BITS_PER_WIDE_INT)
6416 && (STORE_FLAG_VALUE
6417 & ((HOST_WIDE_INT) 1
6418 << (GET_MODE_BITSIZE (inner_mode) - 1))))
6419 #ifdef FLOAT_STORE_FLAG_VALUE
6421 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
6422 && FLOAT_STORE_FLAG_VALUE < 0)
6425 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<')))
6427 else if (((code == EQ
6429 && (GET_MODE_BITSIZE (inner_mode)
6430 <= HOST_BITS_PER_WIDE_INT)
6431 && GET_MODE_CLASS (inner_mode) == MODE_INT
6432 && (STORE_FLAG_VALUE
6433 & ((HOST_WIDE_INT) 1
6434 << (GET_MODE_BITSIZE (inner_mode) - 1))))
6435 #ifdef FLOAT_STORE_FLAG_VALUE
6437 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
6438 && FLOAT_STORE_FLAG_VALUE < 0)
6441 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<')
6443 /* We might have reversed a LT to get a GE here. But this wasn't
6444 actually the comparison of data, so we don't flag that we
6445 have had to reverse the condition. */
6446 did_reverse_condition ^= 1;
6452 else if (reg_set_p (op0, prev))
6453 /* If this sets OP0, but not directly, we have to give up. */
6458 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
6459 code = GET_CODE (x);
6462 code = reverse_condition (code);
6463 did_reverse_condition ^= 1;
6467 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
6473 /* If constant is first, put it last. */
6474 if (CONSTANT_P (op0))
6475 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
6477 /* If OP0 is the result of a comparison, we weren't able to find what
6478 was really being compared, so fail. */
6479 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
6482 /* Canonicalize any ordered comparison with integers involving equality
6483 if we can do computations in the relevant mode and we do not
6486 if (GET_CODE (op1) == CONST_INT
6487 && GET_MODE (op0) != VOIDmode
6488 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
6490 HOST_WIDE_INT const_val = INTVAL (op1);
6491 unsigned HOST_WIDE_INT uconst_val = const_val;
6492 unsigned HOST_WIDE_INT max_val
6493 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
6498 if (const_val != max_val >> 1)
6499 code = LT, op1 = GEN_INT (const_val + 1);
6504 != (((HOST_WIDE_INT) 1
6505 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
6506 code = GT, op1 = GEN_INT (const_val - 1);
6510 if (uconst_val != max_val)
6511 code = LTU, op1 = GEN_INT (uconst_val + 1);
6515 if (uconst_val != 0)
6516 code = GTU, op1 = GEN_INT (uconst_val - 1);
6521 /* If this was floating-point and we reversed anything other than an
6522 EQ or NE, return zero. */
6523 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
6524 && did_reverse_condition && code != NE && code != EQ
6526 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
6530 /* Never return CC0; return zero instead. */
6535 return gen_rtx (code, VOIDmode, op0, op1);
6538 /* Similar to above routine, except that we also put an invariant last
6539 unless both operands are invariants. */
6542 get_condition_for_loop (x)
6545 rtx comparison = get_condition (x, NULL_PTR);
6548 || ! invariant_p (XEXP (comparison, 0))
6549 || invariant_p (XEXP (comparison, 1)))
6552 return gen_rtx (swap_condition (GET_CODE (comparison)), VOIDmode,
6553 XEXP (comparison, 1), XEXP (comparison, 0));