1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997,
3 1998, 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables. Strength reduction is applied to the general
26 induction variables, and induction variable elimination is applied to
27 the basic induction variables.
29 It also finds cases where
30 a register is set within the loop by zero-extending a narrower value
31 and changes these to zero the entire register once before the loop
32 and merely copy the low part within the loop.
34 Most of the complexity is in heuristics to decide when it is worth
35 while to do these things. */
39 #include "coretypes.h"
45 #include "hard-reg-set.h"
46 #include "basic-block.h"
47 #include "insn-config.h"
57 #include "insn-flags.h"
61 /* Not really meaningful values, but at least something. */
62 #ifndef SIMULTANEOUS_PREFETCHES
63 #define SIMULTANEOUS_PREFETCHES 3
65 #ifndef PREFETCH_BLOCK
66 #define PREFETCH_BLOCK 32
69 #define HAVE_prefetch 0
70 #define CODE_FOR_prefetch 0
71 #define gen_prefetch(a,b,c) (abort(), NULL_RTX)
74 /* Give up the prefetch optimizations once we exceed a given threshold.
75 It is unlikely that we would be able to optimize something in a loop
76 with so many detected prefetches. */
77 #define MAX_PREFETCHES 100
78 /* The number of prefetch blocks that are beneficial to fetch at once before
79 a loop with a known (and low) iteration count. */
80 #define PREFETCH_BLOCKS_BEFORE_LOOP_MAX 6
81 /* For very tiny loops it is not worthwhile to prefetch even before the loop,
82 since it is likely that the data are already in the cache. */
83 #define PREFETCH_BLOCKS_BEFORE_LOOP_MIN 2
85 /* Parameterize some prefetch heuristics so they can be turned on and off
86 easily for performance testing on new architectures. These can be
87 defined in target-dependent files. */
89 /* Prefetch is worthwhile only when loads/stores are dense. */
90 #ifndef PREFETCH_ONLY_DENSE_MEM
91 #define PREFETCH_ONLY_DENSE_MEM 1
94 /* Define what we mean by "dense" loads and stores; This value divided by 256
95 is the minimum percentage of memory references that worth prefetching. */
96 #ifndef PREFETCH_DENSE_MEM
97 #define PREFETCH_DENSE_MEM 220
100 /* Do not prefetch for a loop whose iteration count is known to be low. */
101 #ifndef PREFETCH_NO_LOW_LOOPCNT
102 #define PREFETCH_NO_LOW_LOOPCNT 1
105 /* Define what we mean by a "low" iteration count. */
106 #ifndef PREFETCH_LOW_LOOPCNT
107 #define PREFETCH_LOW_LOOPCNT 32
110 /* Do not prefetch for a loop that contains a function call; such a loop is
111 probably not an internal loop. */
112 #ifndef PREFETCH_NO_CALL
113 #define PREFETCH_NO_CALL 1
116 /* Do not prefetch accesses with an extreme stride. */
117 #ifndef PREFETCH_NO_EXTREME_STRIDE
118 #define PREFETCH_NO_EXTREME_STRIDE 1
121 /* Define what we mean by an "extreme" stride. */
122 #ifndef PREFETCH_EXTREME_STRIDE
123 #define PREFETCH_EXTREME_STRIDE 4096
126 /* Define a limit to how far apart indices can be and still be merged
127 into a single prefetch. */
128 #ifndef PREFETCH_EXTREME_DIFFERENCE
129 #define PREFETCH_EXTREME_DIFFERENCE 4096
132 /* Issue prefetch instructions before the loop to fetch data to be used
133 in the first few loop iterations. */
134 #ifndef PREFETCH_BEFORE_LOOP
135 #define PREFETCH_BEFORE_LOOP 1
138 /* Do not handle reversed order prefetches (negative stride). */
139 #ifndef PREFETCH_NO_REVERSE_ORDER
140 #define PREFETCH_NO_REVERSE_ORDER 1
143 /* Prefetch even if the GIV is in conditional code. */
144 #ifndef PREFETCH_CONDITIONAL
145 #define PREFETCH_CONDITIONAL 1
148 #define LOOP_REG_LIFETIME(LOOP, REGNO) \
149 ((REGNO_LAST_LUID (REGNO) - REGNO_FIRST_LUID (REGNO)))
151 #define LOOP_REG_GLOBAL_P(LOOP, REGNO) \
152 ((REGNO_LAST_LUID (REGNO) > INSN_LUID ((LOOP)->end) \
153 || REGNO_FIRST_LUID (REGNO) < INSN_LUID ((LOOP)->start)))
155 #define LOOP_REGNO_NREGS(REGNO, SET_DEST) \
156 ((REGNO) < FIRST_PSEUDO_REGISTER \
157 ? (int) HARD_REGNO_NREGS ((REGNO), GET_MODE (SET_DEST)) : 1)
160 /* Vector mapping INSN_UIDs to luids.
161 The luids are like uids but increase monotonically always.
162 We use them to see whether a jump comes from outside a given loop. */
166 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
167 number the insn is contained in. */
169 struct loop **uid_loop;
171 /* 1 + largest uid of any insn. */
173 int max_uid_for_loop;
175 /* Number of loops detected in current function. Used as index to the
178 static int max_loop_num;
180 /* Bound on pseudo register number before loop optimization.
181 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
182 unsigned int max_reg_before_loop;
184 /* The value to pass to the next call of reg_scan_update. */
185 static int loop_max_reg;
187 /* During the analysis of a loop, a chain of `struct movable's
188 is made to record all the movable insns found.
189 Then the entire chain can be scanned to decide which to move. */
193 rtx insn; /* A movable insn */
194 rtx set_src; /* The expression this reg is set from. */
195 rtx set_dest; /* The destination of this SET. */
196 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
197 of any registers used within the LIBCALL. */
198 int consec; /* Number of consecutive following insns
199 that must be moved with this one. */
200 unsigned int regno; /* The register it sets */
201 short lifetime; /* lifetime of that register;
202 may be adjusted when matching movables
203 that load the same value are found. */
204 short savings; /* Number of insns we can move for this reg,
205 including other movables that force this
206 or match this one. */
207 ENUM_BITFIELD(machine_mode) savemode : 8; /* Nonzero means it is a mode for
208 a low part that we should avoid changing when
209 clearing the rest of the reg. */
210 unsigned int cond : 1; /* 1 if only conditionally movable */
211 unsigned int force : 1; /* 1 means MUST move this insn */
212 unsigned int global : 1; /* 1 means reg is live outside this loop */
213 /* If PARTIAL is 1, GLOBAL means something different:
214 that the reg is live outside the range from where it is set
215 to the following label. */
216 unsigned int done : 1; /* 1 inhibits further processing of this */
218 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
219 In particular, moving it does not make it
221 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
222 load SRC, rather than copying INSN. */
223 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
224 first insn of a consecutive sets group. */
225 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
226 unsigned int insert_temp : 1; /* 1 means we copy to a new pseudo and replace
227 the original insn with a copy from that
228 pseudo, rather than deleting it. */
229 struct movable *match; /* First entry for same value */
230 struct movable *forces; /* An insn that must be moved if this is */
231 struct movable *next;
235 FILE *loop_dump_stream;
237 /* Forward declarations. */
239 static void invalidate_loops_containing_label (rtx);
240 static void find_and_verify_loops (rtx, struct loops *);
241 static void mark_loop_jump (rtx, struct loop *);
242 static void prescan_loop (struct loop *);
243 static int reg_in_basic_block_p (rtx, rtx);
244 static int consec_sets_invariant_p (const struct loop *, rtx, int, rtx);
245 static int labels_in_range_p (rtx, int);
246 static void count_one_set (struct loop_regs *, rtx, rtx, rtx *);
247 static void note_addr_stored (rtx, rtx, void *);
248 static void note_set_pseudo_multiple_uses (rtx, rtx, void *);
249 static int loop_reg_used_before_p (const struct loop *, rtx, rtx);
250 static void scan_loop (struct loop*, int);
252 static void replace_call_address (rtx, rtx, rtx);
254 static rtx skip_consec_insns (rtx, int);
255 static int libcall_benefit (rtx);
256 static void ignore_some_movables (struct loop_movables *);
257 static void force_movables (struct loop_movables *);
258 static void combine_movables (struct loop_movables *, struct loop_regs *);
259 static int num_unmoved_movables (const struct loop *);
260 static int regs_match_p (rtx, rtx, struct loop_movables *);
261 static int rtx_equal_for_loop_p (rtx, rtx, struct loop_movables *,
263 static void add_label_notes (rtx, rtx);
264 static void move_movables (struct loop *loop, struct loop_movables *, int,
266 static void loop_movables_add (struct loop_movables *, struct movable *);
267 static void loop_movables_free (struct loop_movables *);
268 static int count_nonfixed_reads (const struct loop *, rtx);
269 static void loop_bivs_find (struct loop *);
270 static void loop_bivs_init_find (struct loop *);
271 static void loop_bivs_check (struct loop *);
272 static void loop_givs_find (struct loop *);
273 static void loop_givs_check (struct loop *);
274 static int loop_biv_eliminable_p (struct loop *, struct iv_class *, int, int);
275 static int loop_giv_reduce_benefit (struct loop *, struct iv_class *,
276 struct induction *, rtx);
277 static void loop_givs_dead_check (struct loop *, struct iv_class *);
278 static void loop_givs_reduce (struct loop *, struct iv_class *);
279 static void loop_givs_rescan (struct loop *, struct iv_class *, rtx *);
280 static void loop_ivs_free (struct loop *);
281 static void strength_reduce (struct loop *, int);
282 static void find_single_use_in_loop (struct loop_regs *, rtx, rtx);
283 static int valid_initial_value_p (rtx, rtx, int, rtx);
284 static void find_mem_givs (const struct loop *, rtx, rtx, int, int);
285 static void record_biv (struct loop *, struct induction *, rtx, rtx, rtx,
286 rtx, rtx *, int, int);
287 static void check_final_value (const struct loop *, struct induction *);
288 static void loop_ivs_dump (const struct loop *, FILE *, int);
289 static void loop_iv_class_dump (const struct iv_class *, FILE *, int);
290 static void loop_biv_dump (const struct induction *, FILE *, int);
291 static void loop_giv_dump (const struct induction *, FILE *, int);
292 static void record_giv (const struct loop *, struct induction *, rtx, rtx,
293 rtx, rtx, rtx, rtx, int, enum g_types, int, int,
295 static void update_giv_derive (const struct loop *, rtx);
296 static void check_ext_dependent_givs (const struct loop *, struct iv_class *);
297 static int basic_induction_var (const struct loop *, rtx, enum machine_mode,
298 rtx, rtx, rtx *, rtx *, rtx **);
299 static rtx simplify_giv_expr (const struct loop *, rtx, rtx *, int *);
300 static int general_induction_var (const struct loop *loop, rtx, rtx *, rtx *,
301 rtx *, rtx *, int, int *, enum machine_mode);
302 static int consec_sets_giv (const struct loop *, int, rtx, rtx, rtx, rtx *,
303 rtx *, rtx *, rtx *);
304 static int check_dbra_loop (struct loop *, int);
305 static rtx express_from_1 (rtx, rtx, rtx);
306 static rtx combine_givs_p (struct induction *, struct induction *);
307 static int cmp_combine_givs_stats (const void *, const void *);
308 static void combine_givs (struct loop_regs *, struct iv_class *);
309 static int product_cheap_p (rtx, rtx);
310 static int maybe_eliminate_biv (const struct loop *, struct iv_class *, int,
312 static int maybe_eliminate_biv_1 (const struct loop *, rtx, rtx,
313 struct iv_class *, int, basic_block, rtx);
314 static int last_use_this_basic_block (rtx, rtx);
315 static void record_initial (rtx, rtx, void *);
316 static void update_reg_last_use (rtx, rtx);
317 static rtx next_insn_in_loop (const struct loop *, rtx);
318 static void loop_regs_scan (const struct loop *, int);
319 static int count_insns_in_loop (const struct loop *);
320 static int find_mem_in_note_1 (rtx *, void *);
321 static rtx find_mem_in_note (rtx);
322 static void load_mems (const struct loop *);
323 static int insert_loop_mem (rtx *, void *);
324 static int replace_loop_mem (rtx *, void *);
325 static void replace_loop_mems (rtx, rtx, rtx, int);
326 static int replace_loop_reg (rtx *, void *);
327 static void replace_loop_regs (rtx insn, rtx, rtx);
328 static void note_reg_stored (rtx, rtx, void *);
329 static void try_copy_prop (const struct loop *, rtx, unsigned int);
330 static void try_swap_copy_prop (const struct loop *, rtx, unsigned int);
331 static rtx check_insn_for_givs (struct loop *, rtx, int, int);
332 static rtx check_insn_for_bivs (struct loop *, rtx, int, int);
333 static rtx gen_add_mult (rtx, rtx, rtx, rtx);
334 static void loop_regs_update (const struct loop *, rtx);
335 static int iv_add_mult_cost (rtx, rtx, rtx, rtx);
337 static rtx loop_insn_emit_after (const struct loop *, basic_block, rtx, rtx);
338 static rtx loop_call_insn_emit_before (const struct loop *, basic_block,
340 static rtx loop_call_insn_hoist (const struct loop *, rtx);
341 static rtx loop_insn_sink_or_swim (const struct loop *, rtx);
343 static void loop_dump_aux (const struct loop *, FILE *, int);
344 static void loop_delete_insns (rtx, rtx);
345 static HOST_WIDE_INT remove_constant_addition (rtx *);
346 static rtx gen_load_of_final_value (rtx, rtx);
347 void debug_ivs (const struct loop *);
348 void debug_iv_class (const struct iv_class *);
349 void debug_biv (const struct induction *);
350 void debug_giv (const struct induction *);
351 void debug_loop (const struct loop *);
352 void debug_loops (const struct loops *);
354 typedef struct loop_replace_args
361 /* Nonzero iff INSN is between START and END, inclusive. */
362 #define INSN_IN_RANGE_P(INSN, START, END) \
363 (INSN_UID (INSN) < max_uid_for_loop \
364 && INSN_LUID (INSN) >= INSN_LUID (START) \
365 && INSN_LUID (INSN) <= INSN_LUID (END))
367 /* Indirect_jump_in_function is computed once per function. */
368 static int indirect_jump_in_function;
369 static int indirect_jump_in_function_p (rtx);
371 static int compute_luids (rtx, rtx, int);
373 static int biv_elimination_giv_has_0_offset (struct induction *,
374 struct induction *, rtx);
376 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
377 copy the value of the strength reduced giv to its original register. */
378 static int copy_cost;
380 /* Cost of using a register, to normalize the benefits of a giv. */
381 static int reg_address_cost;
386 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
388 reg_address_cost = address_cost (reg, SImode);
390 copy_cost = COSTS_N_INSNS (1);
393 /* Compute the mapping from uids to luids.
394 LUIDs are numbers assigned to insns, like uids,
395 except that luids increase monotonically through the code.
396 Start at insn START and stop just before END. Assign LUIDs
397 starting with PREV_LUID + 1. Return the last assigned LUID + 1. */
399 compute_luids (rtx start, rtx end, int prev_luid)
404 for (insn = start, i = prev_luid; insn != end; insn = NEXT_INSN (insn))
406 if (INSN_UID (insn) >= max_uid_for_loop)
408 /* Don't assign luids to line-number NOTEs, so that the distance in
409 luids between two insns is not affected by -g. */
410 if (GET_CODE (insn) != NOTE
411 || NOTE_LINE_NUMBER (insn) <= 0)
412 uid_luid[INSN_UID (insn)] = ++i;
414 /* Give a line number note the same luid as preceding insn. */
415 uid_luid[INSN_UID (insn)] = i;
420 /* Entry point of this file. Perform loop optimization
421 on the current function. F is the first insn of the function
422 and DUMPFILE is a stream for output of a trace of actions taken
423 (or 0 if none should be output). */
426 loop_optimize (rtx f, FILE *dumpfile, int flags)
430 struct loops loops_data;
431 struct loops *loops = &loops_data;
432 struct loop_info *loops_info;
434 loop_dump_stream = dumpfile;
436 init_recog_no_volatile ();
438 max_reg_before_loop = max_reg_num ();
439 loop_max_reg = max_reg_before_loop;
443 /* Count the number of loops. */
446 for (insn = f; insn; insn = NEXT_INSN (insn))
448 if (GET_CODE (insn) == NOTE
449 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
453 /* Don't waste time if no loops. */
454 if (max_loop_num == 0)
457 loops->num = max_loop_num;
459 /* Get size to use for tables indexed by uids.
460 Leave some space for labels allocated by find_and_verify_loops. */
461 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
463 uid_luid = (int *) xcalloc (max_uid_for_loop, sizeof (int));
464 uid_loop = (struct loop **) xcalloc (max_uid_for_loop,
465 sizeof (struct loop *));
467 /* Allocate storage for array of loops. */
468 loops->array = (struct loop *)
469 xcalloc (loops->num, sizeof (struct loop));
471 /* Find and process each loop.
472 First, find them, and record them in order of their beginnings. */
473 find_and_verify_loops (f, loops);
475 /* Allocate and initialize auxiliary loop information. */
476 loops_info = xcalloc (loops->num, sizeof (struct loop_info));
477 for (i = 0; i < (int) loops->num; i++)
478 loops->array[i].aux = loops_info + i;
480 /* Now find all register lifetimes. This must be done after
481 find_and_verify_loops, because it might reorder the insns in the
483 reg_scan (f, max_reg_before_loop, 1);
485 /* This must occur after reg_scan so that registers created by gcse
486 will have entries in the register tables.
488 We could have added a call to reg_scan after gcse_main in toplev.c,
489 but moving this call to init_alias_analysis is more efficient. */
490 init_alias_analysis ();
492 /* See if we went too far. Note that get_max_uid already returns
493 one more that the maximum uid of all insn. */
494 if (get_max_uid () > max_uid_for_loop)
496 /* Now reset it to the actual size we need. See above. */
497 max_uid_for_loop = get_max_uid ();
499 /* find_and_verify_loops has already called compute_luids, but it
500 might have rearranged code afterwards, so we need to recompute
502 compute_luids (f, NULL_RTX, 0);
504 /* Don't leave gaps in uid_luid for insns that have been
505 deleted. It is possible that the first or last insn
506 using some register has been deleted by cross-jumping.
507 Make sure that uid_luid for that former insn's uid
508 points to the general area where that insn used to be. */
509 for (i = 0; i < max_uid_for_loop; i++)
511 uid_luid[0] = uid_luid[i];
512 if (uid_luid[0] != 0)
515 for (i = 0; i < max_uid_for_loop; i++)
516 if (uid_luid[i] == 0)
517 uid_luid[i] = uid_luid[i - 1];
519 /* Determine if the function has indirect jump. On some systems
520 this prevents low overhead loop instructions from being used. */
521 indirect_jump_in_function = indirect_jump_in_function_p (f);
523 /* Now scan the loops, last ones first, since this means inner ones are done
524 before outer ones. */
525 for (i = max_loop_num - 1; i >= 0; i--)
527 struct loop *loop = &loops->array[i];
529 if (! loop->invalid && loop->end)
530 scan_loop (loop, flags);
533 end_alias_analysis ();
542 /* Returns the next insn, in execution order, after INSN. START and
543 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
544 respectively. LOOP->TOP, if non-NULL, is the top of the loop in the
545 insn-stream; it is used with loops that are entered near the
549 next_insn_in_loop (const struct loop *loop, rtx insn)
551 insn = NEXT_INSN (insn);
553 if (insn == loop->end)
556 /* Go to the top of the loop, and continue there. */
563 if (insn == loop->scan_start)
570 /* Optimize one loop described by LOOP. */
572 /* ??? Could also move memory writes out of loops if the destination address
573 is invariant, the source is invariant, the memory write is not volatile,
574 and if we can prove that no read inside the loop can read this address
575 before the write occurs. If there is a read of this address after the
576 write, then we can also mark the memory read as invariant. */
579 scan_loop (struct loop *loop, int flags)
581 struct loop_info *loop_info = LOOP_INFO (loop);
582 struct loop_regs *regs = LOOP_REGS (loop);
584 rtx loop_start = loop->start;
585 rtx loop_end = loop->end;
587 /* 1 if we are scanning insns that could be executed zero times. */
589 /* 1 if we are scanning insns that might never be executed
590 due to a subroutine call which might exit before they are reached. */
592 /* Number of insns in the loop. */
595 rtx temp, update_start, update_end;
596 /* The SET from an insn, if it is the only SET in the insn. */
598 /* Chain describing insns movable in current loop. */
599 struct loop_movables *movables = LOOP_MOVABLES (loop);
600 /* Ratio of extra register life span we can justify
601 for saving an instruction. More if loop doesn't call subroutines
602 since in that case saving an insn makes more difference
603 and more registers are available. */
605 /* Nonzero if we are scanning instructions in a sub-loop. */
614 /* Determine whether this loop starts with a jump down to a test at
615 the end. This will occur for a small number of loops with a test
616 that is too complex to duplicate in front of the loop.
618 We search for the first insn or label in the loop, skipping NOTEs.
619 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
620 (because we might have a loop executed only once that contains a
621 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
622 (in case we have a degenerate loop).
624 Note that if we mistakenly think that a loop is entered at the top
625 when, in fact, it is entered at the exit test, the only effect will be
626 slightly poorer optimization. Making the opposite error can generate
627 incorrect code. Since very few loops now start with a jump to the
628 exit test, the code here to detect that case is very conservative. */
630 for (p = NEXT_INSN (loop_start);
632 && GET_CODE (p) != CODE_LABEL && ! INSN_P (p)
633 && (GET_CODE (p) != NOTE
634 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
635 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
639 loop->scan_start = p;
641 /* If loop end is the end of the current function, then emit a
642 NOTE_INSN_DELETED after loop_end and set loop->sink to the dummy
643 note insn. This is the position we use when sinking insns out of
645 if (NEXT_INSN (loop->end) != 0)
646 loop->sink = NEXT_INSN (loop->end);
648 loop->sink = emit_note_after (NOTE_INSN_DELETED, loop->end);
650 /* Set up variables describing this loop. */
652 threshold = (loop_info->has_call ? 1 : 2) * (1 + n_non_fixed_regs);
654 /* If loop has a jump before the first label,
655 the true entry is the target of that jump.
656 Start scan from there.
657 But record in LOOP->TOP the place where the end-test jumps
658 back to so we can scan that after the end of the loop. */
659 if (GET_CODE (p) == JUMP_INSN
660 /* Loop entry must be unconditional jump (and not a RETURN) */
661 && any_uncondjump_p (p)
662 && JUMP_LABEL (p) != 0
663 /* Check to see whether the jump actually
664 jumps out of the loop (meaning it's no loop).
665 This case can happen for things like
666 do {..} while (0). If this label was generated previously
667 by loop, we can't tell anything about it and have to reject
669 && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, loop_end))
671 loop->top = next_label (loop->scan_start);
672 loop->scan_start = JUMP_LABEL (p);
675 /* If LOOP->SCAN_START was an insn created by loop, we don't know its luid
676 as required by loop_reg_used_before_p. So skip such loops. (This
677 test may never be true, but it's best to play it safe.)
679 Also, skip loops where we do not start scanning at a label. This
680 test also rejects loops starting with a JUMP_INSN that failed the
683 if (INSN_UID (loop->scan_start) >= max_uid_for_loop
684 || GET_CODE (loop->scan_start) != CODE_LABEL)
686 if (loop_dump_stream)
687 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
688 INSN_UID (loop_start), INSN_UID (loop_end));
692 /* Allocate extra space for REGs that might be created by load_mems.
693 We allocate a little extra slop as well, in the hopes that we
694 won't have to reallocate the regs array. */
695 loop_regs_scan (loop, loop_info->mems_idx + 16);
696 insn_count = count_insns_in_loop (loop);
698 if (loop_dump_stream)
700 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
701 INSN_UID (loop_start), INSN_UID (loop_end), insn_count);
703 fprintf (loop_dump_stream, "Continue at insn %d.\n",
704 INSN_UID (loop->cont));
707 /* Scan through the loop finding insns that are safe to move.
708 Set REGS->ARRAY[I].SET_IN_LOOP negative for the reg I being set, so that
709 this reg will be considered invariant for subsequent insns.
710 We consider whether subsequent insns use the reg
711 in deciding whether it is worth actually moving.
713 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
714 and therefore it is possible that the insns we are scanning
715 would never be executed. At such times, we must make sure
716 that it is safe to execute the insn once instead of zero times.
717 When MAYBE_NEVER is 0, all insns will be executed at least once
718 so that is not a problem. */
720 for (in_libcall = 0, p = next_insn_in_loop (loop, loop->scan_start);
722 p = next_insn_in_loop (loop, p))
724 if (in_libcall && INSN_P (p) && find_reg_note (p, REG_RETVAL, NULL_RTX))
726 if (GET_CODE (p) == INSN)
728 temp = find_reg_note (p, REG_LIBCALL, NULL_RTX);
732 && (set = single_set (p))
733 && GET_CODE (SET_DEST (set)) == REG
734 #ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
735 && SET_DEST (set) != pic_offset_table_rtx
737 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
743 rtx src = SET_SRC (set);
744 rtx dependencies = 0;
746 /* Figure out what to use as a source of this insn. If a
747 REG_EQUIV note is given or if a REG_EQUAL note with a
748 constant operand is specified, use it as the source and
749 mark that we should move this insn by calling
750 emit_move_insn rather that duplicating the insn.
752 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL
754 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
756 src = XEXP (temp, 0), move_insn = 1;
759 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
760 if (temp && CONSTANT_P (XEXP (temp, 0)))
761 src = XEXP (temp, 0), move_insn = 1;
762 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
764 src = XEXP (temp, 0);
765 /* A libcall block can use regs that don't appear in
766 the equivalent expression. To move the libcall,
767 we must move those regs too. */
768 dependencies = libcall_other_reg (p, src);
772 /* For parallels, add any possible uses to the dependencies, as
773 we can't move the insn without resolving them first. */
774 if (GET_CODE (PATTERN (p)) == PARALLEL)
776 for (i = 0; i < XVECLEN (PATTERN (p), 0); i++)
778 rtx x = XVECEXP (PATTERN (p), 0, i);
779 if (GET_CODE (x) == USE)
781 = gen_rtx_EXPR_LIST (VOIDmode, XEXP (x, 0),
786 if (/* The register is used in basic blocks other
787 than the one where it is set (meaning that
788 something after this point in the loop might
789 depend on its value before the set). */
790 ! reg_in_basic_block_p (p, SET_DEST (set))
791 /* And the set is not guaranteed to be executed once
792 the loop starts, or the value before the set is
793 needed before the set occurs...
795 ??? Note we have quadratic behavior here, mitigated
796 by the fact that the previous test will often fail for
797 large loops. Rather than re-scanning the entire loop
798 each time for register usage, we should build tables
799 of the register usage and use them here instead. */
801 || loop_reg_used_before_p (loop, set, p)))
802 /* It is unsafe to move the set. However, it may be OK to
803 move the source into a new pseudo, and substitute a
804 reg-to-reg copy for the original insn.
806 This code used to consider it OK to move a set of a variable
807 which was not created by the user and not used in an exit
809 That behavior is incorrect and was removed. */
812 /* Don't try to optimize a MODE_CC set with a constant
813 source. It probably will be combined with a conditional
815 if (GET_MODE_CLASS (GET_MODE (SET_DEST (set))) == MODE_CC
818 /* Don't try to optimize a register that was made
819 by loop-optimization for an inner loop.
820 We don't know its life-span, so we can't compute
822 else if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
824 /* Don't move the source and add a reg-to-reg copy:
825 - with -Os (this certainly increases size),
826 - if the mode doesn't support copy operations (obviously),
827 - if the source is already a reg (the motion will gain nothing),
828 - if the source is a legitimate constant (likewise). */
831 || ! can_copy_p (GET_MODE (SET_SRC (set)))
832 || GET_CODE (SET_SRC (set)) == REG
833 || (CONSTANT_P (SET_SRC (set))
834 && LEGITIMATE_CONSTANT_P (SET_SRC (set)))))
836 else if ((tem = loop_invariant_p (loop, src))
837 && (dependencies == 0
839 = loop_invariant_p (loop, dependencies)) != 0)
840 && (regs->array[REGNO (SET_DEST (set))].set_in_loop == 1
842 = consec_sets_invariant_p
843 (loop, SET_DEST (set),
844 regs->array[REGNO (SET_DEST (set))].set_in_loop,
846 /* If the insn can cause a trap (such as divide by zero),
847 can't move it unless it's guaranteed to be executed
848 once loop is entered. Even a function call might
849 prevent the trap insn from being reached
850 (since it might exit!) */
851 && ! ((maybe_never || call_passed)
852 && may_trap_p (src)))
855 int regno = REGNO (SET_DEST (set));
857 /* A potential lossage is where we have a case where two insns
858 can be combined as long as they are both in the loop, but
859 we move one of them outside the loop. For large loops,
860 this can lose. The most common case of this is the address
861 of a function being called.
863 Therefore, if this register is marked as being used
864 exactly once if we are in a loop with calls
865 (a "large loop"), see if we can replace the usage of
866 this register with the source of this SET. If we can,
869 Don't do this if P has a REG_RETVAL note or if we have
870 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
872 if (loop_info->has_call
873 && regs->array[regno].single_usage != 0
874 && regs->array[regno].single_usage != const0_rtx
875 && REGNO_FIRST_UID (regno) == INSN_UID (p)
876 && (REGNO_LAST_UID (regno)
877 == INSN_UID (regs->array[regno].single_usage))
878 && regs->array[regno].set_in_loop == 1
879 && GET_CODE (SET_SRC (set)) != ASM_OPERANDS
880 && ! side_effects_p (SET_SRC (set))
881 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
882 && (! SMALL_REGISTER_CLASSES
883 || (! (GET_CODE (SET_SRC (set)) == REG
884 && (REGNO (SET_SRC (set))
885 < FIRST_PSEUDO_REGISTER))))
886 /* This test is not redundant; SET_SRC (set) might be
887 a call-clobbered register and the life of REGNO
888 might span a call. */
889 && ! modified_between_p (SET_SRC (set), p,
890 regs->array[regno].single_usage)
891 && no_labels_between_p (p,
892 regs->array[regno].single_usage)
893 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
894 regs->array[regno].single_usage))
896 /* Replace any usage in a REG_EQUAL note. Must copy
897 the new source, so that we don't get rtx sharing
898 between the SET_SOURCE and REG_NOTES of insn p. */
899 REG_NOTES (regs->array[regno].single_usage)
901 (REG_NOTES (regs->array[regno].single_usage),
902 SET_DEST (set), copy_rtx (SET_SRC (set))));
905 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set));
907 regs->array[regno+i].set_in_loop = 0;
911 m = (struct movable *) xmalloc (sizeof (struct movable));
915 m->dependencies = dependencies;
916 m->set_dest = SET_DEST (set);
919 = regs->array[REGNO (SET_DEST (set))].set_in_loop - 1;
923 m->move_insn = move_insn;
924 m->move_insn_first = 0;
925 m->insert_temp = insert_temp;
926 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
927 m->savemode = VOIDmode;
929 /* Set M->cond if either loop_invariant_p
930 or consec_sets_invariant_p returned 2
931 (only conditionally invariant). */
932 m->cond = ((tem | tem1 | tem2) > 1);
933 m->global = LOOP_REG_GLOBAL_P (loop, regno);
935 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
936 m->savings = regs->array[regno].n_times_set;
937 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
938 m->savings += libcall_benefit (p);
939 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
940 regs->array[regno+i].set_in_loop = move_insn ? -2 : -1;
941 /* Add M to the end of the chain MOVABLES. */
942 loop_movables_add (movables, m);
946 /* It is possible for the first instruction to have a
947 REG_EQUAL note but a non-invariant SET_SRC, so we must
948 remember the status of the first instruction in case
949 the last instruction doesn't have a REG_EQUAL note. */
950 m->move_insn_first = m->move_insn;
952 /* Skip this insn, not checking REG_LIBCALL notes. */
953 p = next_nonnote_insn (p);
954 /* Skip the consecutive insns, if there are any. */
955 p = skip_consec_insns (p, m->consec);
956 /* Back up to the last insn of the consecutive group. */
957 p = prev_nonnote_insn (p);
959 /* We must now reset m->move_insn, m->is_equiv, and
960 possibly m->set_src to correspond to the effects of
962 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
964 m->set_src = XEXP (temp, 0), m->move_insn = 1;
967 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
968 if (temp && CONSTANT_P (XEXP (temp, 0)))
969 m->set_src = XEXP (temp, 0), m->move_insn = 1;
975 = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
978 /* If this register is always set within a STRICT_LOW_PART
979 or set to zero, then its high bytes are constant.
980 So clear them outside the loop and within the loop
981 just load the low bytes.
982 We must check that the machine has an instruction to do so.
983 Also, if the value loaded into the register
984 depends on the same register, this cannot be done. */
985 else if (SET_SRC (set) == const0_rtx
986 && GET_CODE (NEXT_INSN (p)) == INSN
987 && (set1 = single_set (NEXT_INSN (p)))
988 && GET_CODE (set1) == SET
989 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
990 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
991 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
993 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
995 int regno = REGNO (SET_DEST (set));
996 if (regs->array[regno].set_in_loop == 2)
999 m = (struct movable *) xmalloc (sizeof (struct movable));
1002 m->set_dest = SET_DEST (set);
1003 m->dependencies = 0;
1009 m->move_insn_first = 0;
1010 m->insert_temp = insert_temp;
1012 /* If the insn may not be executed on some cycles,
1013 we can't clear the whole reg; clear just high part.
1014 Not even if the reg is used only within this loop.
1021 Clearing x before the inner loop could clobber a value
1022 being saved from the last time around the outer loop.
1023 However, if the reg is not used outside this loop
1024 and all uses of the register are in the same
1025 basic block as the store, there is no problem.
1027 If this insn was made by loop, we don't know its
1028 INSN_LUID and hence must make a conservative
1030 m->global = (INSN_UID (p) >= max_uid_for_loop
1031 || LOOP_REG_GLOBAL_P (loop, regno)
1032 || (labels_in_range_p
1033 (p, REGNO_FIRST_LUID (regno))));
1034 if (maybe_never && m->global)
1035 m->savemode = GET_MODE (SET_SRC (set1));
1037 m->savemode = VOIDmode;
1041 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
1044 i < LOOP_REGNO_NREGS (regno, SET_DEST (set));
1046 regs->array[regno+i].set_in_loop = -1;
1047 /* Add M to the end of the chain MOVABLES. */
1048 loop_movables_add (movables, m);
1053 /* Past a call insn, we get to insns which might not be executed
1054 because the call might exit. This matters for insns that trap.
1055 Constant and pure call insns always return, so they don't count. */
1056 else if (GET_CODE (p) == CALL_INSN && ! CONST_OR_PURE_CALL_P (p))
1058 /* Past a label or a jump, we get to insns for which we
1059 can't count on whether or how many times they will be
1060 executed during each iteration. Therefore, we can
1061 only move out sets of trivial variables
1062 (those not used after the loop). */
1063 /* Similar code appears twice in strength_reduce. */
1064 else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
1065 /* If we enter the loop in the middle, and scan around to the
1066 beginning, don't set maybe_never for that. This must be an
1067 unconditional jump, otherwise the code at the top of the
1068 loop might never be executed. Unconditional jumps are
1069 followed by a barrier then the loop_end. */
1070 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop->top
1071 && NEXT_INSN (NEXT_INSN (p)) == loop_end
1072 && any_uncondjump_p (p)))
1074 else if (GET_CODE (p) == NOTE)
1076 /* At the virtual top of a converted loop, insns are again known to
1077 be executed: logically, the loop begins here even though the exit
1078 code has been duplicated. */
1079 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
1080 maybe_never = call_passed = 0;
1081 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
1083 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
1088 /* If one movable subsumes another, ignore that other. */
1090 ignore_some_movables (movables);
1092 /* For each movable insn, see if the reg that it loads
1093 leads when it dies right into another conditionally movable insn.
1094 If so, record that the second insn "forces" the first one,
1095 since the second can be moved only if the first is. */
1097 force_movables (movables);
1099 /* See if there are multiple movable insns that load the same value.
1100 If there are, make all but the first point at the first one
1101 through the `match' field, and add the priorities of them
1102 all together as the priority of the first. */
1104 combine_movables (movables, regs);
1106 /* Now consider each movable insn to decide whether it is worth moving.
1107 Store 0 in regs->array[I].set_in_loop for each reg I that is moved.
1109 For machines with few registers this increases code size, so do not
1110 move moveables when optimizing for code size on such machines.
1111 (The 18 below is the value for i386.) */
1114 || (reg_class_size[GENERAL_REGS] > 18 && !loop_info->has_call))
1116 move_movables (loop, movables, threshold, insn_count);
1118 /* Recalculate regs->array if move_movables has created new
1120 if (max_reg_num () > regs->num)
1122 loop_regs_scan (loop, 0);
1123 for (update_start = loop_start;
1124 PREV_INSN (update_start)
1125 && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL;
1126 update_start = PREV_INSN (update_start))
1128 update_end = NEXT_INSN (loop_end);
1130 reg_scan_update (update_start, update_end, loop_max_reg);
1131 loop_max_reg = max_reg_num ();
1135 /* Now candidates that still are negative are those not moved.
1136 Change regs->array[I].set_in_loop to indicate that those are not actually
1138 for (i = 0; i < regs->num; i++)
1139 if (regs->array[i].set_in_loop < 0)
1140 regs->array[i].set_in_loop = regs->array[i].n_times_set;
1142 /* Now that we've moved some things out of the loop, we might be able to
1143 hoist even more memory references. */
1146 /* Recalculate regs->array if load_mems has created new registers. */
1147 if (max_reg_num () > regs->num)
1148 loop_regs_scan (loop, 0);
1150 for (update_start = loop_start;
1151 PREV_INSN (update_start)
1152 && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL;
1153 update_start = PREV_INSN (update_start))
1155 update_end = NEXT_INSN (loop_end);
1157 reg_scan_update (update_start, update_end, loop_max_reg);
1158 loop_max_reg = max_reg_num ();
1160 if (flag_strength_reduce)
1162 if (update_end && GET_CODE (update_end) == CODE_LABEL)
1163 /* Ensure our label doesn't go away. */
1164 LABEL_NUSES (update_end)++;
1166 strength_reduce (loop, flags);
1168 reg_scan_update (update_start, update_end, loop_max_reg);
1169 loop_max_reg = max_reg_num ();
1171 if (update_end && GET_CODE (update_end) == CODE_LABEL
1172 && --LABEL_NUSES (update_end) == 0)
1173 delete_related_insns (update_end);
1177 /* The movable information is required for strength reduction. */
1178 loop_movables_free (movables);
1185 /* Add elements to *OUTPUT to record all the pseudo-regs
1186 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1189 record_excess_regs (rtx in_this, rtx not_in_this, rtx *output)
1195 code = GET_CODE (in_this);
1209 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1210 && ! reg_mentioned_p (in_this, not_in_this))
1211 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1218 fmt = GET_RTX_FORMAT (code);
1219 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1226 for (j = 0; j < XVECLEN (in_this, i); j++)
1227 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1231 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1237 /* Check what regs are referred to in the libcall block ending with INSN,
1238 aside from those mentioned in the equivalent value.
1239 If there are none, return 0.
1240 If there are one or more, return an EXPR_LIST containing all of them. */
1243 libcall_other_reg (rtx insn, rtx equiv)
1245 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1246 rtx p = XEXP (note, 0);
1249 /* First, find all the regs used in the libcall block
1250 that are not mentioned as inputs to the result. */
1254 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1255 || GET_CODE (p) == CALL_INSN)
1256 record_excess_regs (PATTERN (p), equiv, &output);
1263 /* Return 1 if all uses of REG
1264 are between INSN and the end of the basic block. */
1267 reg_in_basic_block_p (rtx insn, rtx reg)
1269 int regno = REGNO (reg);
1272 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1275 /* Search this basic block for the already recorded last use of the reg. */
1276 for (p = insn; p; p = NEXT_INSN (p))
1278 switch (GET_CODE (p))
1285 /* Ordinary insn: if this is the last use, we win. */
1286 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1291 /* Jump insn: if this is the last use, we win. */
1292 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1294 /* Otherwise, it's the end of the basic block, so we lose. */
1299 /* It's the end of the basic block, so we lose. */
1307 /* The "last use" that was recorded can't be found after the first
1308 use. This can happen when the last use was deleted while
1309 processing an inner loop, this inner loop was then completely
1310 unrolled, and the outer loop is always exited after the inner loop,
1311 so that everything after the first use becomes a single basic block. */
1315 /* Compute the benefit of eliminating the insns in the block whose
1316 last insn is LAST. This may be a group of insns used to compute a
1317 value directly or can contain a library call. */
1320 libcall_benefit (rtx last)
1325 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1326 insn != last; insn = NEXT_INSN (insn))
1328 if (GET_CODE (insn) == CALL_INSN)
1329 benefit += 10; /* Assume at least this many insns in a library
1331 else if (GET_CODE (insn) == INSN
1332 && GET_CODE (PATTERN (insn)) != USE
1333 && GET_CODE (PATTERN (insn)) != CLOBBER)
1340 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1343 skip_consec_insns (rtx insn, int count)
1345 for (; count > 0; count--)
1349 /* If first insn of libcall sequence, skip to end. */
1350 /* Do this at start of loop, since INSN is guaranteed to
1352 if (GET_CODE (insn) != NOTE
1353 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1354 insn = XEXP (temp, 0);
1357 insn = NEXT_INSN (insn);
1358 while (GET_CODE (insn) == NOTE);
1364 /* Ignore any movable whose insn falls within a libcall
1365 which is part of another movable.
1366 We make use of the fact that the movable for the libcall value
1367 was made later and so appears later on the chain. */
1370 ignore_some_movables (struct loop_movables *movables)
1372 struct movable *m, *m1;
1374 for (m = movables->head; m; m = m->next)
1376 /* Is this a movable for the value of a libcall? */
1377 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1381 /* Check for earlier movables inside that range,
1382 and mark them invalid. We cannot use LUIDs here because
1383 insns created by loop.c for prior loops don't have LUIDs.
1384 Rather than reject all such insns from movables, we just
1385 explicitly check each insn in the libcall (since invariant
1386 libcalls aren't that common). */
1387 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1388 for (m1 = movables->head; m1 != m; m1 = m1->next)
1389 if (m1->insn == insn)
1395 /* For each movable insn, see if the reg that it loads
1396 leads when it dies right into another conditionally movable insn.
1397 If so, record that the second insn "forces" the first one,
1398 since the second can be moved only if the first is. */
1401 force_movables (struct loop_movables *movables)
1403 struct movable *m, *m1;
1405 for (m1 = movables->head; m1; m1 = m1->next)
1406 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1407 if (!m1->partial && !m1->done)
1409 int regno = m1->regno;
1410 for (m = m1->next; m; m = m->next)
1411 /* ??? Could this be a bug? What if CSE caused the
1412 register of M1 to be used after this insn?
1413 Since CSE does not update regno_last_uid,
1414 this insn M->insn might not be where it dies.
1415 But very likely this doesn't matter; what matters is
1416 that M's reg is computed from M1's reg. */
1417 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1420 if (m != 0 && m->set_src == m1->set_dest
1421 /* If m->consec, m->set_src isn't valid. */
1425 /* Increase the priority of the moving the first insn
1426 since it permits the second to be moved as well. */
1430 m1->lifetime += m->lifetime;
1431 m1->savings += m->savings;
1436 /* Find invariant expressions that are equal and can be combined into
1440 combine_movables (struct loop_movables *movables, struct loop_regs *regs)
1443 char *matched_regs = (char *) xmalloc (regs->num);
1444 enum machine_mode mode;
1446 /* Regs that are set more than once are not allowed to match
1447 or be matched. I'm no longer sure why not. */
1448 /* Only pseudo registers are allowed to match or be matched,
1449 since move_movables does not validate the change. */
1450 /* Perhaps testing m->consec_sets would be more appropriate here? */
1452 for (m = movables->head; m; m = m->next)
1453 if (m->match == 0 && regs->array[m->regno].n_times_set == 1
1454 && m->regno >= FIRST_PSEUDO_REGISTER
1459 int regno = m->regno;
1461 memset (matched_regs, 0, regs->num);
1462 matched_regs[regno] = 1;
1464 /* We want later insns to match the first one. Don't make the first
1465 one match any later ones. So start this loop at m->next. */
1466 for (m1 = m->next; m1; m1 = m1->next)
1467 if (m != m1 && m1->match == 0
1469 && regs->array[m1->regno].n_times_set == 1
1470 && m1->regno >= FIRST_PSEUDO_REGISTER
1471 /* A reg used outside the loop mustn't be eliminated. */
1473 /* A reg used for zero-extending mustn't be eliminated. */
1475 && (matched_regs[m1->regno]
1478 /* Can combine regs with different modes loaded from the
1479 same constant only if the modes are the same or
1480 if both are integer modes with M wider or the same
1481 width as M1. The check for integer is redundant, but
1482 safe, since the only case of differing destination
1483 modes with equal sources is when both sources are
1484 VOIDmode, i.e., CONST_INT. */
1485 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1486 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1487 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1488 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1489 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1490 /* See if the source of M1 says it matches M. */
1491 && ((GET_CODE (m1->set_src) == REG
1492 && matched_regs[REGNO (m1->set_src)])
1493 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1495 && ((m->dependencies == m1->dependencies)
1496 || rtx_equal_p (m->dependencies, m1->dependencies)))
1498 m->lifetime += m1->lifetime;
1499 m->savings += m1->savings;
1502 matched_regs[m1->regno] = 1;
1506 /* Now combine the regs used for zero-extension.
1507 This can be done for those not marked `global'
1508 provided their lives don't overlap. */
1510 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1511 mode = GET_MODE_WIDER_MODE (mode))
1513 struct movable *m0 = 0;
1515 /* Combine all the registers for extension from mode MODE.
1516 Don't combine any that are used outside this loop. */
1517 for (m = movables->head; m; m = m->next)
1518 if (m->partial && ! m->global
1519 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1523 int first = REGNO_FIRST_LUID (m->regno);
1524 int last = REGNO_LAST_LUID (m->regno);
1528 /* First one: don't check for overlap, just record it. */
1533 /* Make sure they extend to the same mode.
1534 (Almost always true.) */
1535 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1538 /* We already have one: check for overlap with those
1539 already combined together. */
1540 for (m1 = movables->head; m1 != m; m1 = m1->next)
1541 if (m1 == m0 || (m1->partial && m1->match == m0))
1542 if (! (REGNO_FIRST_LUID (m1->regno) > last
1543 || REGNO_LAST_LUID (m1->regno) < first))
1546 /* No overlap: we can combine this with the others. */
1547 m0->lifetime += m->lifetime;
1548 m0->savings += m->savings;
1558 free (matched_regs);
1561 /* Returns the number of movable instructions in LOOP that were not
1562 moved outside the loop. */
1565 num_unmoved_movables (const struct loop *loop)
1570 for (m = LOOP_MOVABLES (loop)->head; m; m = m->next)
1578 /* Return 1 if regs X and Y will become the same if moved. */
1581 regs_match_p (rtx x, rtx y, struct loop_movables *movables)
1583 unsigned int xn = REGNO (x);
1584 unsigned int yn = REGNO (y);
1585 struct movable *mx, *my;
1587 for (mx = movables->head; mx; mx = mx->next)
1588 if (mx->regno == xn)
1591 for (my = movables->head; my; my = my->next)
1592 if (my->regno == yn)
1596 && ((mx->match == my->match && mx->match != 0)
1598 || mx == my->match));
1601 /* Return 1 if X and Y are identical-looking rtx's.
1602 This is the Lisp function EQUAL for rtx arguments.
1604 If two registers are matching movables or a movable register and an
1605 equivalent constant, consider them equal. */
1608 rtx_equal_for_loop_p (rtx x, rtx y, struct loop_movables *movables,
1609 struct loop_regs *regs)
1619 if (x == 0 || y == 0)
1622 code = GET_CODE (x);
1624 /* If we have a register and a constant, they may sometimes be
1626 if (GET_CODE (x) == REG && regs->array[REGNO (x)].set_in_loop == -2
1629 for (m = movables->head; m; m = m->next)
1630 if (m->move_insn && m->regno == REGNO (x)
1631 && rtx_equal_p (m->set_src, y))
1634 else if (GET_CODE (y) == REG && regs->array[REGNO (y)].set_in_loop == -2
1637 for (m = movables->head; m; m = m->next)
1638 if (m->move_insn && m->regno == REGNO (y)
1639 && rtx_equal_p (m->set_src, x))
1643 /* Otherwise, rtx's of different codes cannot be equal. */
1644 if (code != GET_CODE (y))
1647 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1648 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1650 if (GET_MODE (x) != GET_MODE (y))
1653 /* These three types of rtx's can be compared nonrecursively. */
1655 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1657 if (code == LABEL_REF)
1658 return XEXP (x, 0) == XEXP (y, 0);
1659 if (code == SYMBOL_REF)
1660 return XSTR (x, 0) == XSTR (y, 0);
1662 /* Compare the elements. If any pair of corresponding elements
1663 fail to match, return 0 for the whole things. */
1665 fmt = GET_RTX_FORMAT (code);
1666 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1671 if (XWINT (x, i) != XWINT (y, i))
1676 if (XINT (x, i) != XINT (y, i))
1681 /* Two vectors must have the same length. */
1682 if (XVECLEN (x, i) != XVECLEN (y, i))
1685 /* And the corresponding elements must match. */
1686 for (j = 0; j < XVECLEN (x, i); j++)
1687 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
1688 movables, regs) == 0)
1693 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables, regs)
1699 if (strcmp (XSTR (x, i), XSTR (y, i)))
1704 /* These are just backpointers, so they don't matter. */
1710 /* It is believed that rtx's at this level will never
1711 contain anything but integers and other rtx's,
1712 except for within LABEL_REFs and SYMBOL_REFs. */
1720 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1721 insns in INSNS which use the reference. LABEL_NUSES for CODE_LABEL
1722 references is incremented once for each added note. */
1725 add_label_notes (rtx x, rtx insns)
1727 enum rtx_code code = GET_CODE (x);
1732 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
1734 /* This code used to ignore labels that referred to dispatch tables to
1735 avoid flow generating (slightly) worse code.
1737 We no longer ignore such label references (see LABEL_REF handling in
1738 mark_jump_label for additional information). */
1739 for (insn = insns; insn; insn = NEXT_INSN (insn))
1740 if (reg_mentioned_p (XEXP (x, 0), insn))
1742 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (x, 0),
1744 if (LABEL_P (XEXP (x, 0)))
1745 LABEL_NUSES (XEXP (x, 0))++;
1749 fmt = GET_RTX_FORMAT (code);
1750 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1753 add_label_notes (XEXP (x, i), insns);
1754 else if (fmt[i] == 'E')
1755 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1756 add_label_notes (XVECEXP (x, i, j), insns);
1760 /* Scan MOVABLES, and move the insns that deserve to be moved.
1761 If two matching movables are combined, replace one reg with the
1762 other throughout. */
1765 move_movables (struct loop *loop, struct loop_movables *movables,
1766 int threshold, int insn_count)
1768 struct loop_regs *regs = LOOP_REGS (loop);
1769 int nregs = regs->num;
1773 rtx loop_start = loop->start;
1774 rtx loop_end = loop->end;
1775 /* Map of pseudo-register replacements to handle combining
1776 when we move several insns that load the same value
1777 into different pseudo-registers. */
1778 rtx *reg_map = (rtx *) xcalloc (nregs, sizeof (rtx));
1779 char *already_moved = (char *) xcalloc (nregs, sizeof (char));
1781 for (m = movables->head; m; m = m->next)
1783 /* Describe this movable insn. */
1785 if (loop_dump_stream)
1787 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1788 INSN_UID (m->insn), m->regno, m->lifetime);
1790 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1792 fprintf (loop_dump_stream, "cond ");
1794 fprintf (loop_dump_stream, "force ");
1796 fprintf (loop_dump_stream, "global ");
1798 fprintf (loop_dump_stream, "done ");
1800 fprintf (loop_dump_stream, "move-insn ");
1802 fprintf (loop_dump_stream, "matches %d ",
1803 INSN_UID (m->match->insn));
1805 fprintf (loop_dump_stream, "forces %d ",
1806 INSN_UID (m->forces->insn));
1809 /* Ignore the insn if it's already done (it matched something else).
1810 Otherwise, see if it is now safe to move. */
1814 || (1 == loop_invariant_p (loop, m->set_src)
1815 && (m->dependencies == 0
1816 || 1 == loop_invariant_p (loop, m->dependencies))
1818 || 1 == consec_sets_invariant_p (loop, m->set_dest,
1821 && (! m->forces || m->forces->done))
1825 int savings = m->savings;
1827 /* We have an insn that is safe to move.
1828 Compute its desirability. */
1833 if (loop_dump_stream)
1834 fprintf (loop_dump_stream, "savings %d ", savings);
1836 if (regs->array[regno].moved_once && loop_dump_stream)
1837 fprintf (loop_dump_stream, "halved since already moved ");
1839 /* An insn MUST be moved if we already moved something else
1840 which is safe only if this one is moved too: that is,
1841 if already_moved[REGNO] is nonzero. */
1843 /* An insn is desirable to move if the new lifetime of the
1844 register is no more than THRESHOLD times the old lifetime.
1845 If it's not desirable, it means the loop is so big
1846 that moving won't speed things up much,
1847 and it is liable to make register usage worse. */
1849 /* It is also desirable to move if it can be moved at no
1850 extra cost because something else was already moved. */
1852 if (already_moved[regno]
1853 || flag_move_all_movables
1854 || (threshold * savings * m->lifetime) >=
1855 (regs->array[regno].moved_once ? insn_count * 2 : insn_count)
1856 || (m->forces && m->forces->done
1857 && regs->array[m->forces->regno].n_times_set == 1))
1861 rtx first = NULL_RTX;
1862 rtx newreg = NULL_RTX;
1865 newreg = gen_reg_rtx (GET_MODE (m->set_dest));
1867 /* Now move the insns that set the reg. */
1869 if (m->partial && m->match)
1873 /* Find the end of this chain of matching regs.
1874 Thus, we load each reg in the chain from that one reg.
1875 And that reg is loaded with 0 directly,
1876 since it has ->match == 0. */
1877 for (m1 = m; m1->match; m1 = m1->match);
1878 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1879 SET_DEST (PATTERN (m1->insn)));
1880 i1 = loop_insn_hoist (loop, newpat);
1882 /* Mark the moved, invariant reg as being allowed to
1883 share a hard reg with the other matching invariant. */
1884 REG_NOTES (i1) = REG_NOTES (m->insn);
1885 r1 = SET_DEST (PATTERN (m->insn));
1886 r2 = SET_DEST (PATTERN (m1->insn));
1888 = gen_rtx_EXPR_LIST (VOIDmode, r1,
1889 gen_rtx_EXPR_LIST (VOIDmode, r2,
1891 delete_insn (m->insn);
1896 if (loop_dump_stream)
1897 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1899 /* If we are to re-generate the item being moved with a
1900 new move insn, first delete what we have and then emit
1901 the move insn before the loop. */
1902 else if (m->move_insn)
1906 for (count = m->consec; count >= 0; count--)
1908 /* If this is the first insn of a library call sequence,
1909 something is very wrong. */
1910 if (GET_CODE (p) != NOTE
1911 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1914 /* If this is the last insn of a libcall sequence, then
1915 delete every insn in the sequence except the last.
1916 The last insn is handled in the normal manner. */
1917 if (GET_CODE (p) != NOTE
1918 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1920 temp = XEXP (temp, 0);
1922 temp = delete_insn (temp);
1926 p = delete_insn (p);
1928 /* simplify_giv_expr expects that it can walk the insns
1929 at m->insn forwards and see this old sequence we are
1930 tossing here. delete_insn does preserve the next
1931 pointers, but when we skip over a NOTE we must fix
1932 it up. Otherwise that code walks into the non-deleted
1934 while (p && GET_CODE (p) == NOTE)
1935 p = NEXT_INSN (temp) = NEXT_INSN (p);
1939 /* Replace the original insn with a move from
1940 our newly created temp. */
1942 emit_move_insn (m->set_dest, newreg);
1945 emit_insn_before (seq, p);
1950 emit_move_insn (m->insert_temp ? newreg : m->set_dest,
1955 add_label_notes (m->set_src, seq);
1957 i1 = loop_insn_hoist (loop, seq);
1958 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1959 set_unique_reg_note (i1,
1960 m->is_equiv ? REG_EQUIV : REG_EQUAL,
1963 if (loop_dump_stream)
1964 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1966 /* The more regs we move, the less we like moving them. */
1971 for (count = m->consec; count >= 0; count--)
1975 /* If first insn of libcall sequence, skip to end. */
1976 /* Do this at start of loop, since p is guaranteed to
1978 if (GET_CODE (p) != NOTE
1979 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1982 /* If last insn of libcall sequence, move all
1983 insns except the last before the loop. The last
1984 insn is handled in the normal manner. */
1985 if (GET_CODE (p) != NOTE
1986 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1990 rtx fn_address_insn = 0;
1993 for (temp = XEXP (temp, 0); temp != p;
1994 temp = NEXT_INSN (temp))
2000 if (GET_CODE (temp) == NOTE)
2003 body = PATTERN (temp);
2005 /* Find the next insn after TEMP,
2006 not counting USE or NOTE insns. */
2007 for (next = NEXT_INSN (temp); next != p;
2008 next = NEXT_INSN (next))
2009 if (! (GET_CODE (next) == INSN
2010 && GET_CODE (PATTERN (next)) == USE)
2011 && GET_CODE (next) != NOTE)
2014 /* If that is the call, this may be the insn
2015 that loads the function address.
2017 Extract the function address from the insn
2018 that loads it into a register.
2019 If this insn was cse'd, we get incorrect code.
2021 So emit a new move insn that copies the
2022 function address into the register that the
2023 call insn will use. flow.c will delete any
2024 redundant stores that we have created. */
2025 if (GET_CODE (next) == CALL_INSN
2026 && GET_CODE (body) == SET
2027 && GET_CODE (SET_DEST (body)) == REG
2028 && (n = find_reg_note (temp, REG_EQUAL,
2031 fn_reg = SET_SRC (body);
2032 if (GET_CODE (fn_reg) != REG)
2033 fn_reg = SET_DEST (body);
2034 fn_address = XEXP (n, 0);
2035 fn_address_insn = temp;
2037 /* We have the call insn.
2038 If it uses the register we suspect it might,
2039 load it with the correct address directly. */
2040 if (GET_CODE (temp) == CALL_INSN
2042 && reg_referenced_p (fn_reg, body))
2043 loop_insn_emit_after (loop, 0, fn_address_insn,
2045 (fn_reg, fn_address));
2047 if (GET_CODE (temp) == CALL_INSN)
2049 i1 = loop_call_insn_hoist (loop, body);
2050 /* Because the USAGE information potentially
2051 contains objects other than hard registers
2052 we need to copy it. */
2053 if (CALL_INSN_FUNCTION_USAGE (temp))
2054 CALL_INSN_FUNCTION_USAGE (i1)
2055 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
2058 i1 = loop_insn_hoist (loop, body);
2061 if (temp == fn_address_insn)
2062 fn_address_insn = i1;
2063 REG_NOTES (i1) = REG_NOTES (temp);
2064 REG_NOTES (temp) = NULL;
2070 if (m->savemode != VOIDmode)
2072 /* P sets REG to zero; but we should clear only
2073 the bits that are not covered by the mode
2075 rtx reg = m->set_dest;
2080 tem = expand_simple_binop
2081 (GET_MODE (reg), AND, reg,
2082 GEN_INT ((((HOST_WIDE_INT) 1
2083 << GET_MODE_BITSIZE (m->savemode)))
2085 reg, 1, OPTAB_LIB_WIDEN);
2089 emit_move_insn (reg, tem);
2090 sequence = get_insns ();
2092 i1 = loop_insn_hoist (loop, sequence);
2094 else if (GET_CODE (p) == CALL_INSN)
2096 i1 = loop_call_insn_hoist (loop, PATTERN (p));
2097 /* Because the USAGE information potentially
2098 contains objects other than hard registers
2099 we need to copy it. */
2100 if (CALL_INSN_FUNCTION_USAGE (p))
2101 CALL_INSN_FUNCTION_USAGE (i1)
2102 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
2104 else if (count == m->consec && m->move_insn_first)
2107 /* The SET_SRC might not be invariant, so we must
2108 use the REG_EQUAL note. */
2110 emit_move_insn (m->set_dest, m->set_src);
2114 add_label_notes (m->set_src, seq);
2116 i1 = loop_insn_hoist (loop, seq);
2117 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2118 set_unique_reg_note (i1, m->is_equiv ? REG_EQUIV
2119 : REG_EQUAL, m->set_src);
2121 else if (m->insert_temp)
2123 rtx *reg_map2 = (rtx *) xcalloc (REGNO (newreg),
2125 reg_map2 [m->regno] = newreg;
2127 i1 = loop_insn_hoist (loop, copy_rtx (PATTERN (p)));
2128 replace_regs (i1, reg_map2, REGNO (newreg), 1);
2132 i1 = loop_insn_hoist (loop, PATTERN (p));
2134 if (REG_NOTES (i1) == 0)
2136 REG_NOTES (i1) = REG_NOTES (p);
2137 REG_NOTES (p) = NULL;
2139 /* If there is a REG_EQUAL note present whose value
2140 is not loop invariant, then delete it, since it
2141 may cause problems with later optimization passes.
2142 It is possible for cse to create such notes
2143 like this as a result of record_jump_cond. */
2145 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
2146 && ! loop_invariant_p (loop, XEXP (temp, 0)))
2147 remove_note (i1, temp);
2153 if (loop_dump_stream)
2154 fprintf (loop_dump_stream, " moved to %d",
2157 /* If library call, now fix the REG_NOTES that contain
2158 insn pointers, namely REG_LIBCALL on FIRST
2159 and REG_RETVAL on I1. */
2160 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
2162 XEXP (temp, 0) = first;
2163 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
2164 XEXP (temp, 0) = i1;
2171 /* simplify_giv_expr expects that it can walk the insns
2172 at m->insn forwards and see this old sequence we are
2173 tossing here. delete_insn does preserve the next
2174 pointers, but when we skip over a NOTE we must fix
2175 it up. Otherwise that code walks into the non-deleted
2177 while (p && GET_CODE (p) == NOTE)
2178 p = NEXT_INSN (temp) = NEXT_INSN (p);
2183 /* Replace the original insn with a move from
2184 our newly created temp. */
2186 emit_move_insn (m->set_dest, newreg);
2189 emit_insn_before (seq, p);
2193 /* The more regs we move, the less we like moving them. */
2199 if (!m->insert_temp)
2201 /* Any other movable that loads the same register
2203 already_moved[regno] = 1;
2205 /* This reg has been moved out of one loop. */
2206 regs->array[regno].moved_once = 1;
2208 /* The reg set here is now invariant. */
2212 for (i = 0; i < LOOP_REGNO_NREGS (regno, m->set_dest); i++)
2213 regs->array[regno+i].set_in_loop = 0;
2216 /* Change the length-of-life info for the register
2217 to say it lives at least the full length of this loop.
2218 This will help guide optimizations in outer loops. */
2220 if (REGNO_FIRST_LUID (regno) > INSN_LUID (loop_start))
2221 /* This is the old insn before all the moved insns.
2222 We can't use the moved insn because it is out of range
2223 in uid_luid. Only the old insns have luids. */
2224 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2225 if (REGNO_LAST_LUID (regno) < INSN_LUID (loop_end))
2226 REGNO_LAST_UID (regno) = INSN_UID (loop_end);
2229 /* Combine with this moved insn any other matching movables. */
2232 for (m1 = movables->head; m1; m1 = m1->next)
2237 /* Schedule the reg loaded by M1
2238 for replacement so that shares the reg of M.
2239 If the modes differ (only possible in restricted
2240 circumstances, make a SUBREG.
2242 Note this assumes that the target dependent files
2243 treat REG and SUBREG equally, including within
2244 GO_IF_LEGITIMATE_ADDRESS and in all the
2245 predicates since we never verify that replacing the
2246 original register with a SUBREG results in a
2247 recognizable insn. */
2248 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2249 reg_map[m1->regno] = m->set_dest;
2252 = gen_lowpart_common (GET_MODE (m1->set_dest),
2255 /* Get rid of the matching insn
2256 and prevent further processing of it. */
2259 /* if library call, delete all insns. */
2260 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2262 delete_insn_chain (XEXP (temp, 0), m1->insn);
2264 delete_insn (m1->insn);
2266 /* Any other movable that loads the same register
2268 already_moved[m1->regno] = 1;
2270 /* The reg merged here is now invariant,
2271 if the reg it matches is invariant. */
2276 i < LOOP_REGNO_NREGS (regno, m1->set_dest);
2278 regs->array[m1->regno+i].set_in_loop = 0;
2282 else if (loop_dump_stream)
2283 fprintf (loop_dump_stream, "not desirable");
2285 else if (loop_dump_stream && !m->match)
2286 fprintf (loop_dump_stream, "not safe");
2288 if (loop_dump_stream)
2289 fprintf (loop_dump_stream, "\n");
2293 new_start = loop_start;
2295 /* Go through all the instructions in the loop, making
2296 all the register substitutions scheduled in REG_MAP. */
2297 for (p = new_start; p != loop_end; p = NEXT_INSN (p))
2298 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
2299 || GET_CODE (p) == CALL_INSN)
2301 replace_regs (PATTERN (p), reg_map, nregs, 0);
2302 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2308 free (already_moved);
2313 loop_movables_add (struct loop_movables *movables, struct movable *m)
2315 if (movables->head == 0)
2318 movables->last->next = m;
2324 loop_movables_free (struct loop_movables *movables)
2327 struct movable *m_next;
2329 for (m = movables->head; m; m = m_next)
2337 /* Scan X and replace the address of any MEM in it with ADDR.
2338 REG is the address that MEM should have before the replacement. */
2341 replace_call_address (rtx x, rtx reg, rtx addr)
2349 code = GET_CODE (x);
2363 /* Short cut for very common case. */
2364 replace_call_address (XEXP (x, 1), reg, addr);
2368 /* Short cut for very common case. */
2369 replace_call_address (XEXP (x, 0), reg, addr);
2373 /* If this MEM uses a reg other than the one we expected,
2374 something is wrong. */
2375 if (XEXP (x, 0) != reg)
2384 fmt = GET_RTX_FORMAT (code);
2385 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2388 replace_call_address (XEXP (x, i), reg, addr);
2389 else if (fmt[i] == 'E')
2392 for (j = 0; j < XVECLEN (x, i); j++)
2393 replace_call_address (XVECEXP (x, i, j), reg, addr);
2399 /* Return the number of memory refs to addresses that vary
2403 count_nonfixed_reads (const struct loop *loop, rtx x)
2413 code = GET_CODE (x);
2427 return ((loop_invariant_p (loop, XEXP (x, 0)) != 1)
2428 + count_nonfixed_reads (loop, XEXP (x, 0)));
2435 fmt = GET_RTX_FORMAT (code);
2436 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2439 value += count_nonfixed_reads (loop, XEXP (x, i));
2443 for (j = 0; j < XVECLEN (x, i); j++)
2444 value += count_nonfixed_reads (loop, XVECEXP (x, i, j));
2450 /* Scan a loop setting the elements `cont', `vtop', `loops_enclosed',
2451 `has_call', `has_nonconst_call', `has_volatile', `has_tablejump',
2452 `unknown_address_altered', `unknown_constant_address_altered', and
2453 `num_mem_sets' in LOOP. Also, fill in the array `mems' and the
2454 list `store_mems' in LOOP. */
2457 prescan_loop (struct loop *loop)
2461 struct loop_info *loop_info = LOOP_INFO (loop);
2462 rtx start = loop->start;
2463 rtx end = loop->end;
2464 /* The label after END. Jumping here is just like falling off the
2465 end of the loop. We use next_nonnote_insn instead of next_label
2466 as a hedge against the (pathological) case where some actual insn
2467 might end up between the two. */
2468 rtx exit_target = next_nonnote_insn (end);
2470 loop_info->has_indirect_jump = indirect_jump_in_function;
2471 loop_info->pre_header_has_call = 0;
2472 loop_info->has_call = 0;
2473 loop_info->has_nonconst_call = 0;
2474 loop_info->has_prefetch = 0;
2475 loop_info->has_volatile = 0;
2476 loop_info->has_tablejump = 0;
2477 loop_info->has_multiple_exit_targets = 0;
2480 loop_info->unknown_address_altered = 0;
2481 loop_info->unknown_constant_address_altered = 0;
2482 loop_info->store_mems = NULL_RTX;
2483 loop_info->first_loop_store_insn = NULL_RTX;
2484 loop_info->mems_idx = 0;
2485 loop_info->num_mem_sets = 0;
2486 /* If loop opts run twice, this was set on 1st pass for 2nd. */
2487 loop_info->preconditioned = NOTE_PRECONDITIONED (end);
2489 for (insn = start; insn && GET_CODE (insn) != CODE_LABEL;
2490 insn = PREV_INSN (insn))
2492 if (GET_CODE (insn) == CALL_INSN)
2494 loop_info->pre_header_has_call = 1;
2499 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2500 insn = NEXT_INSN (insn))
2502 switch (GET_CODE (insn))
2505 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2508 /* Count number of loops contained in this one. */
2511 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2516 if (! CONST_OR_PURE_CALL_P (insn))
2518 loop_info->unknown_address_altered = 1;
2519 loop_info->has_nonconst_call = 1;
2521 else if (pure_call_p (insn))
2522 loop_info->has_nonconst_call = 1;
2523 loop_info->has_call = 1;
2524 if (can_throw_internal (insn))
2525 loop_info->has_multiple_exit_targets = 1;
2527 /* Calls initializing constant objects have CLOBBER of MEM /u in the
2528 attached FUNCTION_USAGE expression list, not accounted for by the
2529 code above. We should note these to avoid missing dependencies in
2530 later references. */
2534 for (fusage_entry = CALL_INSN_FUNCTION_USAGE (insn);
2535 fusage_entry; fusage_entry = XEXP (fusage_entry, 1))
2537 rtx fusage = XEXP (fusage_entry, 0);
2539 if (GET_CODE (fusage) == CLOBBER
2540 && GET_CODE (XEXP (fusage, 0)) == MEM
2541 && RTX_UNCHANGING_P (XEXP (fusage, 0)))
2543 note_stores (fusage, note_addr_stored, loop_info);
2544 if (! loop_info->first_loop_store_insn
2545 && loop_info->store_mems)
2546 loop_info->first_loop_store_insn = insn;
2553 if (! loop_info->has_multiple_exit_targets)
2555 rtx set = pc_set (insn);
2559 rtx src = SET_SRC (set);
2562 if (GET_CODE (src) == IF_THEN_ELSE)
2564 label1 = XEXP (src, 1);
2565 label2 = XEXP (src, 2);
2575 if (label1 && label1 != pc_rtx)
2577 if (GET_CODE (label1) != LABEL_REF)
2579 /* Something tricky. */
2580 loop_info->has_multiple_exit_targets = 1;
2583 else if (XEXP (label1, 0) != exit_target
2584 && LABEL_OUTSIDE_LOOP_P (label1))
2586 /* A jump outside the current loop. */
2587 loop_info->has_multiple_exit_targets = 1;
2599 /* A return, or something tricky. */
2600 loop_info->has_multiple_exit_targets = 1;
2606 if (volatile_refs_p (PATTERN (insn)))
2607 loop_info->has_volatile = 1;
2609 if (GET_CODE (insn) == JUMP_INSN
2610 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
2611 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
2612 loop_info->has_tablejump = 1;
2614 note_stores (PATTERN (insn), note_addr_stored, loop_info);
2615 if (! loop_info->first_loop_store_insn && loop_info->store_mems)
2616 loop_info->first_loop_store_insn = insn;
2618 if (flag_non_call_exceptions && can_throw_internal (insn))
2619 loop_info->has_multiple_exit_targets = 1;
2627 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
2628 if (/* An exception thrown by a called function might land us
2630 ! loop_info->has_nonconst_call
2631 /* We don't want loads for MEMs moved to a location before the
2632 one at which their stack memory becomes allocated. (Note
2633 that this is not a problem for malloc, etc., since those
2634 require actual function calls. */
2635 && ! current_function_calls_alloca
2636 /* There are ways to leave the loop other than falling off the
2638 && ! loop_info->has_multiple_exit_targets)
2639 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2640 insn = NEXT_INSN (insn))
2641 for_each_rtx (&insn, insert_loop_mem, loop_info);
2643 /* BLKmode MEMs are added to LOOP_STORE_MEM as necessary so
2644 that loop_invariant_p and load_mems can use true_dependence
2645 to determine what is really clobbered. */
2646 if (loop_info->unknown_address_altered)
2648 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2650 loop_info->store_mems
2651 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2653 if (loop_info->unknown_constant_address_altered)
2655 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2657 RTX_UNCHANGING_P (mem) = 1;
2658 loop_info->store_mems
2659 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2663 /* Invalidate all loops containing LABEL. */
2666 invalidate_loops_containing_label (rtx label)
2669 for (loop = uid_loop[INSN_UID (label)]; loop; loop = loop->outer)
2673 /* Scan the function looking for loops. Record the start and end of each loop.
2674 Also mark as invalid loops any loops that contain a setjmp or are branched
2675 to from outside the loop. */
2678 find_and_verify_loops (rtx f, struct loops *loops)
2683 struct loop *current_loop;
2684 struct loop *next_loop;
2687 num_loops = loops->num;
2689 compute_luids (f, NULL_RTX, 0);
2691 /* If there are jumps to undefined labels,
2692 treat them as jumps out of any/all loops.
2693 This also avoids writing past end of tables when there are no loops. */
2696 /* Find boundaries of loops, mark which loops are contained within
2697 loops, and invalidate loops that have setjmp. */
2700 current_loop = NULL;
2701 for (insn = f; insn; insn = NEXT_INSN (insn))
2703 if (GET_CODE (insn) == NOTE)
2704 switch (NOTE_LINE_NUMBER (insn))
2706 case NOTE_INSN_LOOP_BEG:
2707 next_loop = loops->array + num_loops;
2708 next_loop->num = num_loops;
2710 next_loop->start = insn;
2711 next_loop->outer = current_loop;
2712 current_loop = next_loop;
2715 case NOTE_INSN_LOOP_CONT:
2716 current_loop->cont = insn;
2719 case NOTE_INSN_LOOP_VTOP:
2720 current_loop->vtop = insn;
2723 case NOTE_INSN_LOOP_END:
2727 current_loop->end = insn;
2728 current_loop = current_loop->outer;
2735 if (GET_CODE (insn) == CALL_INSN
2736 && find_reg_note (insn, REG_SETJMP, NULL))
2738 /* In this case, we must invalidate our current loop and any
2740 for (loop = current_loop; loop; loop = loop->outer)
2743 if (loop_dump_stream)
2744 fprintf (loop_dump_stream,
2745 "\nLoop at %d ignored due to setjmp.\n",
2746 INSN_UID (loop->start));
2750 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2751 enclosing loop, but this doesn't matter. */
2752 uid_loop[INSN_UID (insn)] = current_loop;
2755 /* Any loop containing a label used in an initializer must be invalidated,
2756 because it can be jumped into from anywhere. */
2757 for (label = forced_labels; label; label = XEXP (label, 1))
2758 invalidate_loops_containing_label (XEXP (label, 0));
2760 /* Any loop containing a label used for an exception handler must be
2761 invalidated, because it can be jumped into from anywhere. */
2762 for_each_eh_label (invalidate_loops_containing_label);
2764 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2765 loop that it is not contained within, that loop is marked invalid.
2766 If any INSN or CALL_INSN uses a label's address, then the loop containing
2767 that label is marked invalid, because it could be jumped into from
2770 Also look for blocks of code ending in an unconditional branch that
2771 exits the loop. If such a block is surrounded by a conditional
2772 branch around the block, move the block elsewhere (see below) and
2773 invert the jump to point to the code block. This may eliminate a
2774 label in our loop and will simplify processing by both us and a
2775 possible second cse pass. */
2777 for (insn = f; insn; insn = NEXT_INSN (insn))
2780 struct loop *this_loop = uid_loop[INSN_UID (insn)];
2782 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
2784 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2786 invalidate_loops_containing_label (XEXP (note, 0));
2789 if (GET_CODE (insn) != JUMP_INSN)
2792 mark_loop_jump (PATTERN (insn), this_loop);
2794 /* See if this is an unconditional branch outside the loop. */
2796 && (GET_CODE (PATTERN (insn)) == RETURN
2797 || (any_uncondjump_p (insn)
2798 && onlyjump_p (insn)
2799 && (uid_loop[INSN_UID (JUMP_LABEL (insn))]
2801 && get_max_uid () < max_uid_for_loop)
2804 rtx our_next = next_real_insn (insn);
2805 rtx last_insn_to_move = NEXT_INSN (insn);
2806 struct loop *dest_loop;
2807 struct loop *outer_loop = NULL;
2809 /* Go backwards until we reach the start of the loop, a label,
2811 for (p = PREV_INSN (insn);
2812 GET_CODE (p) != CODE_LABEL
2813 && ! (GET_CODE (p) == NOTE
2814 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2815 && GET_CODE (p) != JUMP_INSN;
2819 /* Check for the case where we have a jump to an inner nested
2820 loop, and do not perform the optimization in that case. */
2822 if (JUMP_LABEL (insn))
2824 dest_loop = uid_loop[INSN_UID (JUMP_LABEL (insn))];
2827 for (outer_loop = dest_loop; outer_loop;
2828 outer_loop = outer_loop->outer)
2829 if (outer_loop == this_loop)
2834 /* Make sure that the target of P is within the current loop. */
2836 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
2837 && uid_loop[INSN_UID (JUMP_LABEL (p))] != this_loop)
2838 outer_loop = this_loop;
2840 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2841 we have a block of code to try to move.
2843 We look backward and then forward from the target of INSN
2844 to find a BARRIER at the same loop depth as the target.
2845 If we find such a BARRIER, we make a new label for the start
2846 of the block, invert the jump in P and point it to that label,
2847 and move the block of code to the spot we found. */
2850 && GET_CODE (p) == JUMP_INSN
2851 && JUMP_LABEL (p) != 0
2852 /* Just ignore jumps to labels that were never emitted.
2853 These always indicate compilation errors. */
2854 && INSN_UID (JUMP_LABEL (p)) != 0
2855 && any_condjump_p (p) && onlyjump_p (p)
2856 && next_real_insn (JUMP_LABEL (p)) == our_next
2857 /* If it's not safe to move the sequence, then we
2859 && insns_safe_to_move_p (p, NEXT_INSN (insn),
2860 &last_insn_to_move))
2863 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
2864 struct loop *target_loop = uid_loop[INSN_UID (target)];
2868 /* Search for possible garbage past the conditional jumps
2869 and look for the last barrier. */
2870 for (tmp = last_insn_to_move;
2871 tmp && GET_CODE (tmp) != CODE_LABEL; tmp = NEXT_INSN (tmp))
2872 if (GET_CODE (tmp) == BARRIER)
2873 last_insn_to_move = tmp;
2875 for (loc = target; loc; loc = PREV_INSN (loc))
2876 if (GET_CODE (loc) == BARRIER
2877 /* Don't move things inside a tablejump. */
2878 && ((loc2 = next_nonnote_insn (loc)) == 0
2879 || GET_CODE (loc2) != CODE_LABEL
2880 || (loc2 = next_nonnote_insn (loc2)) == 0
2881 || GET_CODE (loc2) != JUMP_INSN
2882 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2883 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2884 && uid_loop[INSN_UID (loc)] == target_loop)
2888 for (loc = target; loc; loc = NEXT_INSN (loc))
2889 if (GET_CODE (loc) == BARRIER
2890 /* Don't move things inside a tablejump. */
2891 && ((loc2 = next_nonnote_insn (loc)) == 0
2892 || GET_CODE (loc2) != CODE_LABEL
2893 || (loc2 = next_nonnote_insn (loc2)) == 0
2894 || GET_CODE (loc2) != JUMP_INSN
2895 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2896 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2897 && uid_loop[INSN_UID (loc)] == target_loop)
2902 rtx cond_label = JUMP_LABEL (p);
2903 rtx new_label = get_label_after (p);
2905 /* Ensure our label doesn't go away. */
2906 LABEL_NUSES (cond_label)++;
2908 /* Verify that uid_loop is large enough and that
2910 if (invert_jump (p, new_label, 1))
2914 /* If no suitable BARRIER was found, create a suitable
2915 one before TARGET. Since TARGET is a fall through
2916 path, we'll need to insert a jump around our block
2917 and add a BARRIER before TARGET.
2919 This creates an extra unconditional jump outside
2920 the loop. However, the benefits of removing rarely
2921 executed instructions from inside the loop usually
2922 outweighs the cost of the extra unconditional jump
2923 outside the loop. */
2928 temp = gen_jump (JUMP_LABEL (insn));
2929 temp = emit_jump_insn_before (temp, target);
2930 JUMP_LABEL (temp) = JUMP_LABEL (insn);
2931 LABEL_NUSES (JUMP_LABEL (insn))++;
2932 loc = emit_barrier_before (target);
2935 /* Include the BARRIER after INSN and copy the
2937 if (squeeze_notes (&new_label, &last_insn_to_move))
2939 reorder_insns (new_label, last_insn_to_move, loc);
2941 /* All those insns are now in TARGET_LOOP. */
2943 q != NEXT_INSN (last_insn_to_move);
2945 uid_loop[INSN_UID (q)] = target_loop;
2947 /* The label jumped to by INSN is no longer a loop
2948 exit. Unless INSN does not have a label (e.g.,
2949 it is a RETURN insn), search loop->exit_labels
2950 to find its label_ref, and remove it. Also turn
2951 off LABEL_OUTSIDE_LOOP_P bit. */
2952 if (JUMP_LABEL (insn))
2954 for (q = 0, r = this_loop->exit_labels;
2956 q = r, r = LABEL_NEXTREF (r))
2957 if (XEXP (r, 0) == JUMP_LABEL (insn))
2959 LABEL_OUTSIDE_LOOP_P (r) = 0;
2961 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
2963 this_loop->exit_labels = LABEL_NEXTREF (r);
2967 for (loop = this_loop; loop && loop != target_loop;
2971 /* If we didn't find it, then something is
2977 /* P is now a jump outside the loop, so it must be put
2978 in loop->exit_labels, and marked as such.
2979 The easiest way to do this is to just call
2980 mark_loop_jump again for P. */
2981 mark_loop_jump (PATTERN (p), this_loop);
2983 /* If INSN now jumps to the insn after it,
2985 if (JUMP_LABEL (insn) != 0
2986 && (next_real_insn (JUMP_LABEL (insn))
2987 == next_real_insn (insn)))
2988 delete_related_insns (insn);
2991 /* Continue the loop after where the conditional
2992 branch used to jump, since the only branch insn
2993 in the block (if it still remains) is an inter-loop
2994 branch and hence needs no processing. */
2995 insn = NEXT_INSN (cond_label);
2997 if (--LABEL_NUSES (cond_label) == 0)
2998 delete_related_insns (cond_label);
3000 /* This loop will be continued with NEXT_INSN (insn). */
3001 insn = PREV_INSN (insn);
3008 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
3009 loops it is contained in, mark the target loop invalid.
3011 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
3014 mark_loop_jump (rtx x, struct loop *loop)
3016 struct loop *dest_loop;
3017 struct loop *outer_loop;
3020 switch (GET_CODE (x))
3033 /* There could be a label reference in here. */
3034 mark_loop_jump (XEXP (x, 0), loop);
3040 mark_loop_jump (XEXP (x, 0), loop);
3041 mark_loop_jump (XEXP (x, 1), loop);
3045 /* This may refer to a LABEL_REF or SYMBOL_REF. */
3046 mark_loop_jump (XEXP (x, 1), loop);
3051 mark_loop_jump (XEXP (x, 0), loop);
3055 dest_loop = uid_loop[INSN_UID (XEXP (x, 0))];
3057 /* Link together all labels that branch outside the loop. This
3058 is used by final_[bg]iv_value and the loop unrolling code. Also
3059 mark this LABEL_REF so we know that this branch should predict
3062 /* A check to make sure the label is not in an inner nested loop,
3063 since this does not count as a loop exit. */
3066 for (outer_loop = dest_loop; outer_loop;
3067 outer_loop = outer_loop->outer)
3068 if (outer_loop == loop)
3074 if (loop && ! outer_loop)
3076 LABEL_OUTSIDE_LOOP_P (x) = 1;
3077 LABEL_NEXTREF (x) = loop->exit_labels;
3078 loop->exit_labels = x;
3080 for (outer_loop = loop;
3081 outer_loop && outer_loop != dest_loop;
3082 outer_loop = outer_loop->outer)
3083 outer_loop->exit_count++;
3086 /* If this is inside a loop, but not in the current loop or one enclosed
3087 by it, it invalidates at least one loop. */
3092 /* We must invalidate every nested loop containing the target of this
3093 label, except those that also contain the jump insn. */
3095 for (; dest_loop; dest_loop = dest_loop->outer)
3097 /* Stop when we reach a loop that also contains the jump insn. */
3098 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3099 if (dest_loop == outer_loop)
3102 /* If we get here, we know we need to invalidate a loop. */
3103 if (loop_dump_stream && ! dest_loop->invalid)
3104 fprintf (loop_dump_stream,
3105 "\nLoop at %d ignored due to multiple entry points.\n",
3106 INSN_UID (dest_loop->start));
3108 dest_loop->invalid = 1;
3113 /* If this is not setting pc, ignore. */
3114 if (SET_DEST (x) == pc_rtx)
3115 mark_loop_jump (SET_SRC (x), loop);
3119 mark_loop_jump (XEXP (x, 1), loop);
3120 mark_loop_jump (XEXP (x, 2), loop);
3125 for (i = 0; i < XVECLEN (x, 0); i++)
3126 mark_loop_jump (XVECEXP (x, 0, i), loop);
3130 for (i = 0; i < XVECLEN (x, 1); i++)
3131 mark_loop_jump (XVECEXP (x, 1, i), loop);
3135 /* Strictly speaking this is not a jump into the loop, only a possible
3136 jump out of the loop. However, we have no way to link the destination
3137 of this jump onto the list of exit labels. To be safe we mark this
3138 loop and any containing loops as invalid. */
3141 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3143 if (loop_dump_stream && ! outer_loop->invalid)
3144 fprintf (loop_dump_stream,
3145 "\nLoop at %d ignored due to unknown exit jump.\n",
3146 INSN_UID (outer_loop->start));
3147 outer_loop->invalid = 1;
3154 /* Return nonzero if there is a label in the range from
3155 insn INSN to and including the insn whose luid is END
3156 INSN must have an assigned luid (i.e., it must not have
3157 been previously created by loop.c). */
3160 labels_in_range_p (rtx insn, int end)
3162 while (insn && INSN_LUID (insn) <= end)
3164 if (GET_CODE (insn) == CODE_LABEL)
3166 insn = NEXT_INSN (insn);
3172 /* Record that a memory reference X is being set. */
3175 note_addr_stored (rtx x, rtx y ATTRIBUTE_UNUSED,
3176 void *data ATTRIBUTE_UNUSED)
3178 struct loop_info *loop_info = data;
3180 if (x == 0 || GET_CODE (x) != MEM)
3183 /* Count number of memory writes.
3184 This affects heuristics in strength_reduce. */
3185 loop_info->num_mem_sets++;
3187 /* BLKmode MEM means all memory is clobbered. */
3188 if (GET_MODE (x) == BLKmode)
3190 if (RTX_UNCHANGING_P (x))
3191 loop_info->unknown_constant_address_altered = 1;
3193 loop_info->unknown_address_altered = 1;
3198 loop_info->store_mems = gen_rtx_EXPR_LIST (VOIDmode, x,
3199 loop_info->store_mems);
3202 /* X is a value modified by an INSN that references a biv inside a loop
3203 exit test (ie, X is somehow related to the value of the biv). If X
3204 is a pseudo that is used more than once, then the biv is (effectively)
3205 used more than once. DATA is a pointer to a loop_regs structure. */
3208 note_set_pseudo_multiple_uses (rtx x, rtx y ATTRIBUTE_UNUSED, void *data)
3210 struct loop_regs *regs = (struct loop_regs *) data;
3215 while (GET_CODE (x) == STRICT_LOW_PART
3216 || GET_CODE (x) == SIGN_EXTRACT
3217 || GET_CODE (x) == ZERO_EXTRACT
3218 || GET_CODE (x) == SUBREG)
3221 if (GET_CODE (x) != REG || REGNO (x) < FIRST_PSEUDO_REGISTER)
3224 /* If we do not have usage information, or if we know the register
3225 is used more than once, note that fact for check_dbra_loop. */
3226 if (REGNO (x) >= max_reg_before_loop
3227 || ! regs->array[REGNO (x)].single_usage
3228 || regs->array[REGNO (x)].single_usage == const0_rtx)
3229 regs->multiple_uses = 1;
3232 /* Return nonzero if the rtx X is invariant over the current loop.
3234 The value is 2 if we refer to something only conditionally invariant.
3236 A memory ref is invariant if it is not volatile and does not conflict
3237 with anything stored in `loop_info->store_mems'. */
3240 loop_invariant_p (const struct loop *loop, rtx x)
3242 struct loop_info *loop_info = LOOP_INFO (loop);
3243 struct loop_regs *regs = LOOP_REGS (loop);
3247 int conditional = 0;
3252 code = GET_CODE (x);
3262 /* A LABEL_REF is normally invariant, however, if we are unrolling
3263 loops, and this label is inside the loop, then it isn't invariant.
3264 This is because each unrolled copy of the loop body will have
3265 a copy of this label. If this was invariant, then an insn loading
3266 the address of this label into a register might get moved outside
3267 the loop, and then each loop body would end up using the same label.
3269 We don't know the loop bounds here though, so just fail for all
3271 if (flag_old_unroll_loops)
3278 case UNSPEC_VOLATILE:
3282 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
3283 since the reg might be set by initialization within the loop. */
3285 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
3286 || x == arg_pointer_rtx || x == pic_offset_table_rtx)
3287 && ! current_function_has_nonlocal_goto)
3290 if (LOOP_INFO (loop)->has_call
3291 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
3294 /* Out-of-range regs can occur when we are called from unrolling.
3295 These have always been created by the unroller and are set in
3296 the loop, hence are never invariant. */
3298 if (REGNO (x) >= (unsigned) regs->num)
3301 if (regs->array[REGNO (x)].set_in_loop < 0)
3304 return regs->array[REGNO (x)].set_in_loop == 0;
3307 /* Volatile memory references must be rejected. Do this before
3308 checking for read-only items, so that volatile read-only items
3309 will be rejected also. */
3310 if (MEM_VOLATILE_P (x))
3313 /* See if there is any dependence between a store and this load. */
3314 mem_list_entry = loop_info->store_mems;
3315 while (mem_list_entry)
3317 if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
3321 mem_list_entry = XEXP (mem_list_entry, 1);
3324 /* It's not invalidated by a store in memory
3325 but we must still verify the address is invariant. */
3329 /* Don't mess with insns declared volatile. */
3330 if (MEM_VOLATILE_P (x))
3338 fmt = GET_RTX_FORMAT (code);
3339 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3343 int tem = loop_invariant_p (loop, XEXP (x, i));
3349 else if (fmt[i] == 'E')
3352 for (j = 0; j < XVECLEN (x, i); j++)
3354 int tem = loop_invariant_p (loop, XVECEXP (x, i, j));
3364 return 1 + conditional;
3367 /* Return nonzero if all the insns in the loop that set REG
3368 are INSN and the immediately following insns,
3369 and if each of those insns sets REG in an invariant way
3370 (not counting uses of REG in them).
3372 The value is 2 if some of these insns are only conditionally invariant.
3374 We assume that INSN itself is the first set of REG
3375 and that its source is invariant. */
3378 consec_sets_invariant_p (const struct loop *loop, rtx reg, int n_sets,
3381 struct loop_regs *regs = LOOP_REGS (loop);
3383 unsigned int regno = REGNO (reg);
3385 /* Number of sets we have to insist on finding after INSN. */
3386 int count = n_sets - 1;
3387 int old = regs->array[regno].set_in_loop;
3391 /* If N_SETS hit the limit, we can't rely on its value. */
3395 regs->array[regno].set_in_loop = 0;
3403 code = GET_CODE (p);
3405 /* If library call, skip to end of it. */
3406 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3411 && (set = single_set (p))
3412 && GET_CODE (SET_DEST (set)) == REG
3413 && REGNO (SET_DEST (set)) == regno)
3415 this = loop_invariant_p (loop, SET_SRC (set));
3418 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3420 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3421 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3423 this = (CONSTANT_P (XEXP (temp, 0))
3424 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3425 && loop_invariant_p (loop, XEXP (temp, 0))));
3432 else if (code != NOTE)
3434 regs->array[regno].set_in_loop = old;
3439 regs->array[regno].set_in_loop = old;
3440 /* If loop_invariant_p ever returned 2, we return 2. */
3441 return 1 + (value & 2);
3445 /* I don't think this condition is sufficient to allow INSN
3446 to be moved, so we no longer test it. */
3448 /* Return 1 if all insns in the basic block of INSN and following INSN
3449 that set REG are invariant according to TABLE. */
3452 all_sets_invariant_p (rtx reg, rtx insn, short *table)
3455 int regno = REGNO (reg);
3461 code = GET_CODE (p);
3462 if (code == CODE_LABEL || code == JUMP_INSN)
3464 if (code == INSN && GET_CODE (PATTERN (p)) == SET
3465 && GET_CODE (SET_DEST (PATTERN (p))) == REG
3466 && REGNO (SET_DEST (PATTERN (p))) == regno)
3468 if (! loop_invariant_p (loop, SET_SRC (PATTERN (p)), table))
3475 /* Look at all uses (not sets) of registers in X. For each, if it is
3476 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3477 a different insn, set USAGE[REGNO] to const0_rtx. */
3480 find_single_use_in_loop (struct loop_regs *regs, rtx insn, rtx x)
3482 enum rtx_code code = GET_CODE (x);
3483 const char *fmt = GET_RTX_FORMAT (code);
3487 regs->array[REGNO (x)].single_usage
3488 = (regs->array[REGNO (x)].single_usage != 0
3489 && regs->array[REGNO (x)].single_usage != insn)
3490 ? const0_rtx : insn;
3492 else if (code == SET)
3494 /* Don't count SET_DEST if it is a REG; otherwise count things
3495 in SET_DEST because if a register is partially modified, it won't
3496 show up as a potential movable so we don't care how USAGE is set
3498 if (GET_CODE (SET_DEST (x)) != REG)
3499 find_single_use_in_loop (regs, insn, SET_DEST (x));
3500 find_single_use_in_loop (regs, insn, SET_SRC (x));
3503 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3505 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3506 find_single_use_in_loop (regs, insn, XEXP (x, i));
3507 else if (fmt[i] == 'E')
3508 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3509 find_single_use_in_loop (regs, insn, XVECEXP (x, i, j));
3513 /* Count and record any set in X which is contained in INSN. Update
3514 REGS->array[I].MAY_NOT_OPTIMIZE and LAST_SET for any register I set
3518 count_one_set (struct loop_regs *regs, rtx insn, rtx x, rtx *last_set)
3520 if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
3521 /* Don't move a reg that has an explicit clobber.
3522 It's not worth the pain to try to do it correctly. */
3523 regs->array[REGNO (XEXP (x, 0))].may_not_optimize = 1;
3525 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3527 rtx dest = SET_DEST (x);
3528 while (GET_CODE (dest) == SUBREG
3529 || GET_CODE (dest) == ZERO_EXTRACT
3530 || GET_CODE (dest) == SIGN_EXTRACT
3531 || GET_CODE (dest) == STRICT_LOW_PART)
3532 dest = XEXP (dest, 0);
3533 if (GET_CODE (dest) == REG)
3536 int regno = REGNO (dest);
3537 for (i = 0; i < LOOP_REGNO_NREGS (regno, dest); i++)
3539 /* If this is the first setting of this reg
3540 in current basic block, and it was set before,
3541 it must be set in two basic blocks, so it cannot
3542 be moved out of the loop. */
3543 if (regs->array[regno].set_in_loop > 0
3544 && last_set[regno] == 0)
3545 regs->array[regno+i].may_not_optimize = 1;
3546 /* If this is not first setting in current basic block,
3547 see if reg was used in between previous one and this.
3548 If so, neither one can be moved. */
3549 if (last_set[regno] != 0
3550 && reg_used_between_p (dest, last_set[regno], insn))
3551 regs->array[regno+i].may_not_optimize = 1;
3552 if (regs->array[regno+i].set_in_loop < 127)
3553 ++regs->array[regno+i].set_in_loop;
3554 last_set[regno+i] = insn;
3560 /* Given a loop that is bounded by LOOP->START and LOOP->END and that
3561 is entered at LOOP->SCAN_START, return 1 if the register set in SET
3562 contained in insn INSN is used by any insn that precedes INSN in
3563 cyclic order starting from the loop entry point.
3565 We don't want to use INSN_LUID here because if we restrict INSN to those
3566 that have a valid INSN_LUID, it means we cannot move an invariant out
3567 from an inner loop past two loops. */
3570 loop_reg_used_before_p (const struct loop *loop, rtx set, rtx insn)
3572 rtx reg = SET_DEST (set);
3575 /* Scan forward checking for register usage. If we hit INSN, we
3576 are done. Otherwise, if we hit LOOP->END, wrap around to LOOP->START. */
3577 for (p = loop->scan_start; p != insn; p = NEXT_INSN (p))
3579 if (INSN_P (p) && reg_overlap_mentioned_p (reg, PATTERN (p)))
3590 /* Information we collect about arrays that we might want to prefetch. */
3591 struct prefetch_info
3593 struct iv_class *class; /* Class this prefetch is based on. */
3594 struct induction *giv; /* GIV this prefetch is based on. */
3595 rtx base_address; /* Start prefetching from this address plus
3597 HOST_WIDE_INT index;
3598 HOST_WIDE_INT stride; /* Prefetch stride in bytes in each
3600 unsigned int bytes_accessed; /* Sum of sizes of all accesses to this
3601 prefetch area in one iteration. */
3602 unsigned int total_bytes; /* Total bytes loop will access in this block.
3603 This is set only for loops with known
3604 iteration counts and is 0xffffffff
3606 int prefetch_in_loop; /* Number of prefetch insns in loop. */
3607 int prefetch_before_loop; /* Number of prefetch insns before loop. */
3608 unsigned int write : 1; /* 1 for read/write prefetches. */
3611 /* Data used by check_store function. */
3612 struct check_store_data
3618 static void check_store (rtx, rtx, void *);
3619 static void emit_prefetch_instructions (struct loop *);
3620 static int rtx_equal_for_prefetch_p (rtx, rtx);
3622 /* Set mem_write when mem_address is found. Used as callback to
3625 check_store (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
3627 struct check_store_data *d = (struct check_store_data *) data;
3629 if ((GET_CODE (x) == MEM) && rtx_equal_p (d->mem_address, XEXP (x, 0)))
3633 /* Like rtx_equal_p, but attempts to swap commutative operands. This is
3634 important to get some addresses combined. Later more sophisticated
3635 transformations can be added when necessary.
3637 ??? Same trick with swapping operand is done at several other places.
3638 It can be nice to develop some common way to handle this. */
3641 rtx_equal_for_prefetch_p (rtx x, rtx y)
3645 enum rtx_code code = GET_CODE (x);
3650 if (code != GET_CODE (y))
3653 code = GET_CODE (x);
3655 if (GET_RTX_CLASS (code) == 'c')
3657 return ((rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 0))
3658 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 1)))
3659 || (rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 1))
3660 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 0))));
3662 /* Compare the elements. If any pair of corresponding elements fails to
3663 match, return 0 for the whole thing. */
3665 fmt = GET_RTX_FORMAT (code);
3666 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3671 if (XWINT (x, i) != XWINT (y, i))
3676 if (XINT (x, i) != XINT (y, i))
3681 /* Two vectors must have the same length. */
3682 if (XVECLEN (x, i) != XVECLEN (y, i))
3685 /* And the corresponding elements must match. */
3686 for (j = 0; j < XVECLEN (x, i); j++)
3687 if (rtx_equal_for_prefetch_p (XVECEXP (x, i, j),
3688 XVECEXP (y, i, j)) == 0)
3693 if (rtx_equal_for_prefetch_p (XEXP (x, i), XEXP (y, i)) == 0)
3698 if (strcmp (XSTR (x, i), XSTR (y, i)))
3703 /* These are just backpointers, so they don't matter. */
3709 /* It is believed that rtx's at this level will never
3710 contain anything but integers and other rtx's,
3711 except for within LABEL_REFs and SYMBOL_REFs. */
3719 /* Remove constant addition value from the expression X (when present)
3722 static HOST_WIDE_INT
3723 remove_constant_addition (rtx *x)
3725 HOST_WIDE_INT addval = 0;
3728 /* Avoid clobbering a shared CONST expression. */
3729 if (GET_CODE (exp) == CONST)
3731 if (GET_CODE (XEXP (exp, 0)) == PLUS
3732 && GET_CODE (XEXP (XEXP (exp, 0), 0)) == SYMBOL_REF
3733 && GET_CODE (XEXP (XEXP (exp, 0), 1)) == CONST_INT)
3735 *x = XEXP (XEXP (exp, 0), 0);
3736 return INTVAL (XEXP (XEXP (exp, 0), 1));
3741 if (GET_CODE (exp) == CONST_INT)
3743 addval = INTVAL (exp);
3747 /* For plus expression recurse on ourself. */
3748 else if (GET_CODE (exp) == PLUS)
3750 addval += remove_constant_addition (&XEXP (exp, 0));
3751 addval += remove_constant_addition (&XEXP (exp, 1));
3753 /* In case our parameter was constant, remove extra zero from the
3755 if (XEXP (exp, 0) == const0_rtx)
3757 else if (XEXP (exp, 1) == const0_rtx)
3764 /* Attempt to identify accesses to arrays that are most likely to cause cache
3765 misses, and emit prefetch instructions a few prefetch blocks forward.
3767 To detect the arrays we use the GIV information that was collected by the
3768 strength reduction pass.
3770 The prefetch instructions are generated after the GIV information is done
3771 and before the strength reduction process. The new GIVs are injected into
3772 the strength reduction tables, so the prefetch addresses are optimized as
3775 GIVs are split into base address, stride, and constant addition values.
3776 GIVs with the same address, stride and close addition values are combined
3777 into a single prefetch. Also writes to GIVs are detected, so that prefetch
3778 for write instructions can be used for the block we write to, on machines
3779 that support write prefetches.
3781 Several heuristics are used to determine when to prefetch. They are
3782 controlled by defined symbols that can be overridden for each target. */
3785 emit_prefetch_instructions (struct loop *loop)
3787 int num_prefetches = 0;
3788 int num_real_prefetches = 0;
3789 int num_real_write_prefetches = 0;
3790 int num_prefetches_before = 0;
3791 int num_write_prefetches_before = 0;
3794 struct iv_class *bl;
3795 struct induction *iv;
3796 struct prefetch_info info[MAX_PREFETCHES];
3797 struct loop_ivs *ivs = LOOP_IVS (loop);
3802 /* Consider only loops w/o calls. When a call is done, the loop is probably
3803 slow enough to read the memory. */
3804 if (PREFETCH_NO_CALL && LOOP_INFO (loop)->has_call)
3806 if (loop_dump_stream)
3807 fprintf (loop_dump_stream, "Prefetch: ignoring loop: has call.\n");
3812 /* Don't prefetch in loops known to have few iterations. */
3813 if (PREFETCH_NO_LOW_LOOPCNT
3814 && LOOP_INFO (loop)->n_iterations
3815 && LOOP_INFO (loop)->n_iterations <= PREFETCH_LOW_LOOPCNT)
3817 if (loop_dump_stream)
3818 fprintf (loop_dump_stream,
3819 "Prefetch: ignoring loop: not enough iterations.\n");
3823 /* Search all induction variables and pick those interesting for the prefetch
3825 for (bl = ivs->list; bl; bl = bl->next)
3827 struct induction *biv = bl->biv, *biv1;
3832 /* Expect all BIVs to be executed in each iteration. This makes our
3833 analysis more conservative. */
3836 /* Discard non-constant additions that we can't handle well yet, and
3837 BIVs that are executed multiple times; such BIVs ought to be
3838 handled in the nested loop. We accept not_every_iteration BIVs,
3839 since these only result in larger strides and make our
3840 heuristics more conservative. */
3841 if (GET_CODE (biv->add_val) != CONST_INT)
3843 if (loop_dump_stream)
3845 fprintf (loop_dump_stream,
3846 "Prefetch: ignoring biv %d: non-constant addition at insn %d:",
3847 REGNO (biv->src_reg), INSN_UID (biv->insn));
3848 print_rtl (loop_dump_stream, biv->add_val);
3849 fprintf (loop_dump_stream, "\n");
3854 if (biv->maybe_multiple)
3856 if (loop_dump_stream)
3858 fprintf (loop_dump_stream,
3859 "Prefetch: ignoring biv %d: maybe_multiple at insn %i:",
3860 REGNO (biv->src_reg), INSN_UID (biv->insn));
3861 print_rtl (loop_dump_stream, biv->add_val);
3862 fprintf (loop_dump_stream, "\n");
3867 basestride += INTVAL (biv1->add_val);
3868 biv1 = biv1->next_iv;
3871 if (biv1 || !basestride)
3874 for (iv = bl->giv; iv; iv = iv->next_iv)
3878 HOST_WIDE_INT index = 0;
3880 HOST_WIDE_INT stride = 0;
3881 int stride_sign = 1;
3882 struct check_store_data d;
3883 const char *ignore_reason = NULL;
3884 int size = GET_MODE_SIZE (GET_MODE (iv));
3886 /* See whether an induction variable is interesting to us and if
3887 not, report the reason. */
3888 if (iv->giv_type != DEST_ADDR)
3889 ignore_reason = "giv is not a destination address";
3891 /* We are interested only in constant stride memory references
3892 in order to be able to compute density easily. */
3893 else if (GET_CODE (iv->mult_val) != CONST_INT)
3894 ignore_reason = "stride is not constant";
3898 stride = INTVAL (iv->mult_val) * basestride;
3905 /* On some targets, reversed order prefetches are not
3907 if (PREFETCH_NO_REVERSE_ORDER && stride_sign < 0)
3908 ignore_reason = "reversed order stride";
3910 /* Prefetch of accesses with an extreme stride might not be
3911 worthwhile, either. */
3912 else if (PREFETCH_NO_EXTREME_STRIDE
3913 && stride > PREFETCH_EXTREME_STRIDE)
3914 ignore_reason = "extreme stride";
3916 /* Ignore GIVs with varying add values; we can't predict the
3917 value for the next iteration. */
3918 else if (!loop_invariant_p (loop, iv->add_val))
3919 ignore_reason = "giv has varying add value";
3921 /* Ignore GIVs in the nested loops; they ought to have been
3923 else if (iv->maybe_multiple)
3924 ignore_reason = "giv is in nested loop";
3927 if (ignore_reason != NULL)
3929 if (loop_dump_stream)
3930 fprintf (loop_dump_stream,
3931 "Prefetch: ignoring giv at %d: %s.\n",
3932 INSN_UID (iv->insn), ignore_reason);
3936 /* Determine the pointer to the basic array we are examining. It is
3937 the sum of the BIV's initial value and the GIV's add_val. */
3938 address = copy_rtx (iv->add_val);
3939 temp = copy_rtx (bl->initial_value);
3941 address = simplify_gen_binary (PLUS, Pmode, temp, address);
3942 index = remove_constant_addition (&address);
3945 d.mem_address = *iv->location;
3947 /* When the GIV is not always executed, we might be better off by
3948 not dirtying the cache pages. */
3949 if (PREFETCH_CONDITIONAL || iv->always_executed)
3950 note_stores (PATTERN (iv->insn), check_store, &d);
3953 if (loop_dump_stream)
3954 fprintf (loop_dump_stream, "Prefetch: Ignoring giv at %d: %s\n",
3955 INSN_UID (iv->insn), "in conditional code.");
3959 /* Attempt to find another prefetch to the same array and see if we
3960 can merge this one. */
3961 for (i = 0; i < num_prefetches; i++)
3962 if (rtx_equal_for_prefetch_p (address, info[i].base_address)
3963 && stride == info[i].stride)
3965 /* In case both access same array (same location
3966 just with small difference in constant indexes), merge
3967 the prefetches. Just do the later and the earlier will
3968 get prefetched from previous iteration.
3969 The artificial threshold should not be too small,
3970 but also not bigger than small portion of memory usually
3971 traversed by single loop. */
3972 if (index >= info[i].index
3973 && index - info[i].index < PREFETCH_EXTREME_DIFFERENCE)
3975 info[i].write |= d.mem_write;
3976 info[i].bytes_accessed += size;
3977 info[i].index = index;
3980 info[num_prefetches].base_address = address;
3985 if (index < info[i].index
3986 && info[i].index - index < PREFETCH_EXTREME_DIFFERENCE)
3988 info[i].write |= d.mem_write;
3989 info[i].bytes_accessed += size;
3995 /* Merging failed. */
3998 info[num_prefetches].giv = iv;
3999 info[num_prefetches].class = bl;
4000 info[num_prefetches].index = index;
4001 info[num_prefetches].stride = stride;
4002 info[num_prefetches].base_address = address;
4003 info[num_prefetches].write = d.mem_write;
4004 info[num_prefetches].bytes_accessed = size;
4006 if (num_prefetches >= MAX_PREFETCHES)
4008 if (loop_dump_stream)
4009 fprintf (loop_dump_stream,
4010 "Maximal number of prefetches exceeded.\n");
4017 for (i = 0; i < num_prefetches; i++)
4021 /* Attempt to calculate the total number of bytes fetched by all
4022 iterations of the loop. Avoid overflow. */
4023 if (LOOP_INFO (loop)->n_iterations
4024 && ((unsigned HOST_WIDE_INT) (0xffffffff / info[i].stride)
4025 >= LOOP_INFO (loop)->n_iterations))
4026 info[i].total_bytes = info[i].stride * LOOP_INFO (loop)->n_iterations;
4028 info[i].total_bytes = 0xffffffff;
4030 density = info[i].bytes_accessed * 100 / info[i].stride;
4032 /* Prefetch might be worthwhile only when the loads/stores are dense. */
4033 if (PREFETCH_ONLY_DENSE_MEM)
4034 if (density * 256 > PREFETCH_DENSE_MEM * 100
4035 && (info[i].total_bytes / PREFETCH_BLOCK
4036 >= PREFETCH_BLOCKS_BEFORE_LOOP_MIN))
4038 info[i].prefetch_before_loop = 1;
4039 info[i].prefetch_in_loop
4040 = (info[i].total_bytes / PREFETCH_BLOCK
4041 > PREFETCH_BLOCKS_BEFORE_LOOP_MAX);
4045 info[i].prefetch_in_loop = 0, info[i].prefetch_before_loop = 0;
4046 if (loop_dump_stream)
4047 fprintf (loop_dump_stream,
4048 "Prefetch: ignoring giv at %d: %d%% density is too low.\n",
4049 INSN_UID (info[i].giv->insn), density);
4052 info[i].prefetch_in_loop = 1, info[i].prefetch_before_loop = 1;
4054 /* Find how many prefetch instructions we'll use within the loop. */
4055 if (info[i].prefetch_in_loop != 0)
4057 info[i].prefetch_in_loop = ((info[i].stride + PREFETCH_BLOCK - 1)
4059 num_real_prefetches += info[i].prefetch_in_loop;
4061 num_real_write_prefetches += info[i].prefetch_in_loop;
4065 /* Determine how many iterations ahead to prefetch within the loop, based
4066 on how many prefetches we currently expect to do within the loop. */
4067 if (num_real_prefetches != 0)
4069 if ((ahead = SIMULTANEOUS_PREFETCHES / num_real_prefetches) == 0)
4071 if (loop_dump_stream)
4072 fprintf (loop_dump_stream,
4073 "Prefetch: ignoring prefetches within loop: ahead is zero; %d < %d\n",
4074 SIMULTANEOUS_PREFETCHES, num_real_prefetches);
4075 num_real_prefetches = 0, num_real_write_prefetches = 0;
4078 /* We'll also use AHEAD to determine how many prefetch instructions to
4079 emit before a loop, so don't leave it zero. */
4081 ahead = PREFETCH_BLOCKS_BEFORE_LOOP_MAX;
4083 for (i = 0; i < num_prefetches; i++)
4085 /* Update if we've decided not to prefetch anything within the loop. */
4086 if (num_real_prefetches == 0)
4087 info[i].prefetch_in_loop = 0;
4089 /* Find how many prefetch instructions we'll use before the loop. */
4090 if (info[i].prefetch_before_loop != 0)
4092 int n = info[i].total_bytes / PREFETCH_BLOCK;
4095 info[i].prefetch_before_loop = n;
4096 num_prefetches_before += n;
4098 num_write_prefetches_before += n;
4101 if (loop_dump_stream)
4103 if (info[i].prefetch_in_loop == 0
4104 && info[i].prefetch_before_loop == 0)
4106 fprintf (loop_dump_stream, "Prefetch insn: %d",
4107 INSN_UID (info[i].giv->insn));
4108 fprintf (loop_dump_stream,
4109 "; in loop: %d; before: %d; %s\n",
4110 info[i].prefetch_in_loop,
4111 info[i].prefetch_before_loop,
4112 info[i].write ? "read/write" : "read only");
4113 fprintf (loop_dump_stream,
4114 " density: %d%%; bytes_accessed: %u; total_bytes: %u\n",
4115 (int) (info[i].bytes_accessed * 100 / info[i].stride),
4116 info[i].bytes_accessed, info[i].total_bytes);
4117 fprintf (loop_dump_stream, " index: " HOST_WIDE_INT_PRINT_DEC
4118 "; stride: " HOST_WIDE_INT_PRINT_DEC "; address: ",
4119 info[i].index, info[i].stride);
4120 print_rtl (loop_dump_stream, info[i].base_address);
4121 fprintf (loop_dump_stream, "\n");
4125 if (num_real_prefetches + num_prefetches_before > 0)
4127 /* Record that this loop uses prefetch instructions. */
4128 LOOP_INFO (loop)->has_prefetch = 1;
4130 if (loop_dump_stream)
4132 fprintf (loop_dump_stream, "Real prefetches needed within loop: %d (write: %d)\n",
4133 num_real_prefetches, num_real_write_prefetches);
4134 fprintf (loop_dump_stream, "Real prefetches needed before loop: %d (write: %d)\n",
4135 num_prefetches_before, num_write_prefetches_before);
4139 for (i = 0; i < num_prefetches; i++)
4143 for (y = 0; y < info[i].prefetch_in_loop; y++)
4145 rtx loc = copy_rtx (*info[i].giv->location);
4147 int bytes_ahead = PREFETCH_BLOCK * (ahead + y);
4148 rtx before_insn = info[i].giv->insn;
4149 rtx prev_insn = PREV_INSN (info[i].giv->insn);
4152 /* We can save some effort by offsetting the address on
4153 architectures with offsettable memory references. */
4154 if (offsettable_address_p (0, VOIDmode, loc))
4155 loc = plus_constant (loc, bytes_ahead);
4158 rtx reg = gen_reg_rtx (Pmode);
4159 loop_iv_add_mult_emit_before (loop, loc, const1_rtx,
4160 GEN_INT (bytes_ahead), reg,
4166 /* Make sure the address operand is valid for prefetch. */
4167 if (! (*insn_data[(int)CODE_FOR_prefetch].operand[0].predicate)
4168 (loc, insn_data[(int)CODE_FOR_prefetch].operand[0].mode))
4169 loc = force_reg (Pmode, loc);
4170 emit_insn (gen_prefetch (loc, GEN_INT (info[i].write),
4174 emit_insn_before (seq, before_insn);
4176 /* Check all insns emitted and record the new GIV
4178 insn = NEXT_INSN (prev_insn);
4179 while (insn != before_insn)
4181 insn = check_insn_for_givs (loop, insn,
4182 info[i].giv->always_executed,
4183 info[i].giv->maybe_multiple);
4184 insn = NEXT_INSN (insn);
4188 if (PREFETCH_BEFORE_LOOP)
4190 /* Emit insns before the loop to fetch the first cache lines or,
4191 if we're not prefetching within the loop, everything we expect
4193 for (y = 0; y < info[i].prefetch_before_loop; y++)
4195 rtx reg = gen_reg_rtx (Pmode);
4196 rtx loop_start = loop->start;
4197 rtx init_val = info[i].class->initial_value;
4198 rtx add_val = simplify_gen_binary (PLUS, Pmode,
4199 info[i].giv->add_val,
4200 GEN_INT (y * PREFETCH_BLOCK));
4202 /* Functions called by LOOP_IV_ADD_EMIT_BEFORE expect a
4203 non-constant INIT_VAL to have the same mode as REG, which
4204 in this case we know to be Pmode. */
4205 if (GET_MODE (init_val) != Pmode && !CONSTANT_P (init_val))
4210 init_val = convert_to_mode (Pmode, init_val, 0);
4213 loop_insn_emit_before (loop, 0, loop_start, seq);
4215 loop_iv_add_mult_emit_before (loop, init_val,
4216 info[i].giv->mult_val,
4217 add_val, reg, 0, loop_start);
4218 emit_insn_before (gen_prefetch (reg, GEN_INT (info[i].write),
4228 /* A "basic induction variable" or biv is a pseudo reg that is set
4229 (within this loop) only by incrementing or decrementing it. */
4230 /* A "general induction variable" or giv is a pseudo reg whose
4231 value is a linear function of a biv. */
4233 /* Bivs are recognized by `basic_induction_var';
4234 Givs by `general_induction_var'. */
4236 /* Communication with routines called via `note_stores'. */
4238 static rtx note_insn;
4240 /* Dummy register to have nonzero DEST_REG for DEST_ADDR type givs. */
4242 static rtx addr_placeholder;
4244 /* ??? Unfinished optimizations, and possible future optimizations,
4245 for the strength reduction code. */
4247 /* ??? The interaction of biv elimination, and recognition of 'constant'
4248 bivs, may cause problems. */
4250 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
4251 performance problems.
4253 Perhaps don't eliminate things that can be combined with an addressing
4254 mode. Find all givs that have the same biv, mult_val, and add_val;
4255 then for each giv, check to see if its only use dies in a following
4256 memory address. If so, generate a new memory address and check to see
4257 if it is valid. If it is valid, then store the modified memory address,
4258 otherwise, mark the giv as not done so that it will get its own iv. */
4260 /* ??? Could try to optimize branches when it is known that a biv is always
4263 /* ??? When replace a biv in a compare insn, we should replace with closest
4264 giv so that an optimized branch can still be recognized by the combiner,
4265 e.g. the VAX acb insn. */
4267 /* ??? Many of the checks involving uid_luid could be simplified if regscan
4268 was rerun in loop_optimize whenever a register was added or moved.
4269 Also, some of the optimizations could be a little less conservative. */
4271 /* Scan the loop body and call FNCALL for each insn. In the addition to the
4272 LOOP and INSN parameters pass MAYBE_MULTIPLE and NOT_EVERY_ITERATION to the
4275 NOT_EVERY_ITERATION is 1 if current insn is not known to be executed at
4276 least once for every loop iteration except for the last one.
4278 MAYBE_MULTIPLE is 1 if current insn may be executed more than once for every
4282 for_each_insn_in_loop (struct loop *loop, loop_insn_callback fncall)
4284 int not_every_iteration = 0;
4285 int maybe_multiple = 0;
4286 int past_loop_latch = 0;
4290 /* If loop_scan_start points to the loop exit test, we have to be wary of
4291 subversive use of gotos inside expression statements. */
4292 if (prev_nonnote_insn (loop->scan_start) != prev_nonnote_insn (loop->start))
4293 maybe_multiple = back_branch_in_range_p (loop, loop->scan_start);
4295 /* Scan through loop and update NOT_EVERY_ITERATION and MAYBE_MULTIPLE. */
4296 for (p = next_insn_in_loop (loop, loop->scan_start);
4298 p = next_insn_in_loop (loop, p))
4300 p = fncall (loop, p, not_every_iteration, maybe_multiple);
4302 /* Past CODE_LABEL, we get to insns that may be executed multiple
4303 times. The only way we can be sure that they can't is if every
4304 jump insn between here and the end of the loop either
4305 returns, exits the loop, is a jump to a location that is still
4306 behind the label, or is a jump to the loop start. */
4308 if (GET_CODE (p) == CODE_LABEL)
4316 insn = NEXT_INSN (insn);
4317 if (insn == loop->scan_start)
4319 if (insn == loop->end)
4325 if (insn == loop->scan_start)
4329 if (GET_CODE (insn) == JUMP_INSN
4330 && GET_CODE (PATTERN (insn)) != RETURN
4331 && (!any_condjump_p (insn)
4332 || (JUMP_LABEL (insn) != 0
4333 && JUMP_LABEL (insn) != loop->scan_start
4334 && !loop_insn_first_p (p, JUMP_LABEL (insn)))))
4342 /* Past a jump, we get to insns for which we can't count
4343 on whether they will be executed during each iteration. */
4344 /* This code appears twice in strength_reduce. There is also similar
4345 code in scan_loop. */
4346 if (GET_CODE (p) == JUMP_INSN
4347 /* If we enter the loop in the middle, and scan around to the
4348 beginning, don't set not_every_iteration for that.
4349 This can be any kind of jump, since we want to know if insns
4350 will be executed if the loop is executed. */
4351 && !(JUMP_LABEL (p) == loop->top
4352 && ((NEXT_INSN (NEXT_INSN (p)) == loop->end
4353 && any_uncondjump_p (p))
4354 || (NEXT_INSN (p) == loop->end && any_condjump_p (p)))))
4358 /* If this is a jump outside the loop, then it also doesn't
4359 matter. Check to see if the target of this branch is on the
4360 loop->exits_labels list. */
4362 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
4363 if (XEXP (label, 0) == JUMP_LABEL (p))
4367 not_every_iteration = 1;
4370 else if (GET_CODE (p) == NOTE)
4372 /* At the virtual top of a converted loop, insns are again known to
4373 be executed each iteration: logically, the loop begins here
4374 even though the exit code has been duplicated.
4376 Insns are also again known to be executed each iteration at
4377 the LOOP_CONT note. */
4378 if ((NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP
4379 || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_CONT)
4381 not_every_iteration = 0;
4382 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
4384 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
4388 /* Note if we pass a loop latch. If we do, then we can not clear
4389 NOT_EVERY_ITERATION below when we pass the last CODE_LABEL in
4390 a loop since a jump before the last CODE_LABEL may have started
4391 a new loop iteration.
4393 Note that LOOP_TOP is only set for rotated loops and we need
4394 this check for all loops, so compare against the CODE_LABEL
4395 which immediately follows LOOP_START. */
4396 if (GET_CODE (p) == JUMP_INSN
4397 && JUMP_LABEL (p) == NEXT_INSN (loop->start))
4398 past_loop_latch = 1;
4400 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
4401 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
4402 or not an insn is known to be executed each iteration of the
4403 loop, whether or not any iterations are known to occur.
4405 Therefore, if we have just passed a label and have no more labels
4406 between here and the test insn of the loop, and we have not passed
4407 a jump to the top of the loop, then we know these insns will be
4408 executed each iteration. */
4410 if (not_every_iteration
4412 && GET_CODE (p) == CODE_LABEL
4413 && no_labels_between_p (p, loop->end)
4414 && loop_insn_first_p (p, loop->cont))
4415 not_every_iteration = 0;
4420 loop_bivs_find (struct loop *loop)
4422 struct loop_regs *regs = LOOP_REGS (loop);
4423 struct loop_ivs *ivs = LOOP_IVS (loop);
4424 /* Temporary list pointers for traversing ivs->list. */
4425 struct iv_class *bl, **backbl;
4429 for_each_insn_in_loop (loop, check_insn_for_bivs);
4431 /* Scan ivs->list to remove all regs that proved not to be bivs.
4432 Make a sanity check against regs->n_times_set. */
4433 for (backbl = &ivs->list, bl = *backbl; bl; bl = bl->next)
4435 if (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4436 /* Above happens if register modified by subreg, etc. */
4437 /* Make sure it is not recognized as a basic induction var: */
4438 || regs->array[bl->regno].n_times_set != bl->biv_count
4439 /* If never incremented, it is invariant that we decided not to
4440 move. So leave it alone. */
4441 || ! bl->incremented)
4443 if (loop_dump_stream)
4444 fprintf (loop_dump_stream, "Biv %d: discarded, %s\n",
4446 (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4447 ? "not induction variable"
4448 : (! bl->incremented ? "never incremented"
4451 REG_IV_TYPE (ivs, bl->regno) = NOT_BASIC_INDUCT;
4458 if (loop_dump_stream)
4459 fprintf (loop_dump_stream, "Biv %d: verified\n", bl->regno);
4465 /* Determine how BIVS are initialized by looking through pre-header
4466 extended basic block. */
4468 loop_bivs_init_find (struct loop *loop)
4470 struct loop_ivs *ivs = LOOP_IVS (loop);
4471 /* Temporary list pointers for traversing ivs->list. */
4472 struct iv_class *bl;
4476 /* Find initial value for each biv by searching backwards from loop_start,
4477 halting at first label. Also record any test condition. */
4480 for (p = loop->start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
4486 if (GET_CODE (p) == CALL_INSN)
4490 note_stores (PATTERN (p), record_initial, ivs);
4492 /* Record any test of a biv that branches around the loop if no store
4493 between it and the start of loop. We only care about tests with
4494 constants and registers and only certain of those. */
4495 if (GET_CODE (p) == JUMP_INSN
4496 && JUMP_LABEL (p) != 0
4497 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop->end)
4498 && (test = get_condition_for_loop (loop, p)) != 0
4499 && GET_CODE (XEXP (test, 0)) == REG
4500 && REGNO (XEXP (test, 0)) < max_reg_before_loop
4501 && (bl = REG_IV_CLASS (ivs, REGNO (XEXP (test, 0)))) != 0
4502 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop->start)
4503 && bl->init_insn == 0)
4505 /* If an NE test, we have an initial value! */
4506 if (GET_CODE (test) == NE)
4509 bl->init_set = gen_rtx_SET (VOIDmode,
4510 XEXP (test, 0), XEXP (test, 1));
4513 bl->initial_test = test;
4519 /* Look at the each biv and see if we can say anything better about its
4520 initial value from any initializing insns set up above. (This is done
4521 in two passes to avoid missing SETs in a PARALLEL.) */
4523 loop_bivs_check (struct loop *loop)
4525 struct loop_ivs *ivs = LOOP_IVS (loop);
4526 /* Temporary list pointers for traversing ivs->list. */
4527 struct iv_class *bl;
4528 struct iv_class **backbl;
4530 for (backbl = &ivs->list; (bl = *backbl); backbl = &bl->next)
4535 if (! bl->init_insn)
4538 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
4539 is a constant, use the value of that. */
4540 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
4541 && CONSTANT_P (XEXP (note, 0)))
4542 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
4543 && CONSTANT_P (XEXP (note, 0))))
4544 src = XEXP (note, 0);
4546 src = SET_SRC (bl->init_set);
4548 if (loop_dump_stream)
4549 fprintf (loop_dump_stream,
4550 "Biv %d: initialized at insn %d: initial value ",
4551 bl->regno, INSN_UID (bl->init_insn));
4553 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
4554 || GET_MODE (src) == VOIDmode)
4555 && valid_initial_value_p (src, bl->init_insn,
4556 LOOP_INFO (loop)->pre_header_has_call,
4559 bl->initial_value = src;
4561 if (loop_dump_stream)
4563 print_simple_rtl (loop_dump_stream, src);
4564 fputc ('\n', loop_dump_stream);
4567 /* If we can't make it a giv,
4568 let biv keep initial value of "itself". */
4569 else if (loop_dump_stream)
4570 fprintf (loop_dump_stream, "is complex\n");
4575 /* Search the loop for general induction variables. */
4578 loop_givs_find (struct loop* loop)
4580 for_each_insn_in_loop (loop, check_insn_for_givs);
4584 /* For each giv for which we still don't know whether or not it is
4585 replaceable, check to see if it is replaceable because its final value
4586 can be calculated. */
4589 loop_givs_check (struct loop *loop)
4591 struct loop_ivs *ivs = LOOP_IVS (loop);
4592 struct iv_class *bl;
4594 for (bl = ivs->list; bl; bl = bl->next)
4596 struct induction *v;
4598 for (v = bl->giv; v; v = v->next_iv)
4599 if (! v->replaceable && ! v->not_replaceable)
4600 check_final_value (loop, v);
4605 /* Return nonzero if it is possible to eliminate the biv BL provided
4606 all givs are reduced. This is possible if either the reg is not
4607 used outside the loop, or we can compute what its final value will
4611 loop_biv_eliminable_p (struct loop *loop, struct iv_class *bl,
4612 int threshold, int insn_count)
4614 /* For architectures with a decrement_and_branch_until_zero insn,
4615 don't do this if we put a REG_NONNEG note on the endtest for this
4618 #ifdef HAVE_decrement_and_branch_until_zero
4621 if (loop_dump_stream)
4622 fprintf (loop_dump_stream,
4623 "Cannot eliminate nonneg biv %d.\n", bl->regno);
4628 /* Check that biv is used outside loop or if it has a final value.
4629 Compare against bl->init_insn rather than loop->start. We aren't
4630 concerned with any uses of the biv between init_insn and
4631 loop->start since these won't be affected by the value of the biv
4632 elsewhere in the function, so long as init_insn doesn't use the
4635 if ((REGNO_LAST_LUID (bl->regno) < INSN_LUID (loop->end)
4637 && INSN_UID (bl->init_insn) < max_uid_for_loop
4638 && REGNO_FIRST_LUID (bl->regno) >= INSN_LUID (bl->init_insn)
4639 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
4640 || (bl->final_value = final_biv_value (loop, bl)))
4641 return maybe_eliminate_biv (loop, bl, 0, threshold, insn_count);
4643 if (loop_dump_stream)
4645 fprintf (loop_dump_stream,
4646 "Cannot eliminate biv %d.\n",
4648 fprintf (loop_dump_stream,
4649 "First use: insn %d, last use: insn %d.\n",
4650 REGNO_FIRST_UID (bl->regno),
4651 REGNO_LAST_UID (bl->regno));
4657 /* Reduce each giv of BL that we have decided to reduce. */
4660 loop_givs_reduce (struct loop *loop, struct iv_class *bl)
4662 struct induction *v;
4664 for (v = bl->giv; v; v = v->next_iv)
4666 struct induction *tv;
4667 if (! v->ignore && v->same == 0)
4669 int auto_inc_opt = 0;
4671 /* If the code for derived givs immediately below has already
4672 allocated a new_reg, we must keep it. */
4674 v->new_reg = gen_reg_rtx (v->mode);
4677 /* If the target has auto-increment addressing modes, and
4678 this is an address giv, then try to put the increment
4679 immediately after its use, so that flow can create an
4680 auto-increment addressing mode. */
4681 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
4682 && bl->biv->always_executed && ! bl->biv->maybe_multiple
4683 /* We don't handle reversed biv's because bl->biv->insn
4684 does not have a valid INSN_LUID. */
4686 && v->always_executed && ! v->maybe_multiple
4687 && INSN_UID (v->insn) < max_uid_for_loop)
4689 /* If other giv's have been combined with this one, then
4690 this will work only if all uses of the other giv's occur
4691 before this giv's insn. This is difficult to check.
4693 We simplify this by looking for the common case where
4694 there is one DEST_REG giv, and this giv's insn is the
4695 last use of the dest_reg of that DEST_REG giv. If the
4696 increment occurs after the address giv, then we can
4697 perform the optimization. (Otherwise, the increment
4698 would have to go before other_giv, and we would not be
4699 able to combine it with the address giv to get an
4700 auto-inc address.) */
4701 if (v->combined_with)
4703 struct induction *other_giv = 0;
4705 for (tv = bl->giv; tv; tv = tv->next_iv)
4713 if (! tv && other_giv
4714 && REGNO (other_giv->dest_reg) < max_reg_before_loop
4715 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
4716 == INSN_UID (v->insn))
4717 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
4720 /* Check for case where increment is before the address
4721 giv. Do this test in "loop order". */
4722 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
4723 && (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
4724 || (INSN_LUID (bl->biv->insn)
4725 > INSN_LUID (loop->scan_start))))
4726 || (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
4727 && (INSN_LUID (loop->scan_start)
4728 < INSN_LUID (bl->biv->insn))))
4737 /* We can't put an insn immediately after one setting
4738 cc0, or immediately before one using cc0. */
4739 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
4740 || (auto_inc_opt == -1
4741 && (prev = prev_nonnote_insn (v->insn)) != 0
4743 && sets_cc0_p (PATTERN (prev))))
4749 v->auto_inc_opt = 1;
4753 /* For each place where the biv is incremented, add an insn
4754 to increment the new, reduced reg for the giv. */
4755 for (tv = bl->biv; tv; tv = tv->next_iv)
4759 /* Skip if location is the same as a previous one. */
4763 insert_before = NEXT_INSN (tv->insn);
4764 else if (auto_inc_opt == 1)
4765 insert_before = NEXT_INSN (v->insn);
4767 insert_before = v->insn;
4769 if (tv->mult_val == const1_rtx)
4770 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
4771 v->new_reg, v->new_reg,
4773 else /* tv->mult_val == const0_rtx */
4774 /* A multiply is acceptable here
4775 since this is presumed to be seldom executed. */
4776 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
4777 v->add_val, v->new_reg,
4781 /* Add code at loop start to initialize giv's reduced reg. */
4783 loop_iv_add_mult_hoist (loop,
4784 extend_value_for_giv (v, bl->initial_value),
4785 v->mult_val, v->add_val, v->new_reg);
4791 /* Check for givs whose first use is their definition and whose
4792 last use is the definition of another giv. If so, it is likely
4793 dead and should not be used to derive another giv nor to
4797 loop_givs_dead_check (struct loop *loop ATTRIBUTE_UNUSED, struct iv_class *bl)
4799 struct induction *v;
4801 for (v = bl->giv; v; v = v->next_iv)
4804 || (v->same && v->same->ignore))
4807 if (v->giv_type == DEST_REG
4808 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
4810 struct induction *v1;
4812 for (v1 = bl->giv; v1; v1 = v1->next_iv)
4813 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
4821 loop_givs_rescan (struct loop *loop, struct iv_class *bl, rtx *reg_map)
4823 struct induction *v;
4825 for (v = bl->giv; v; v = v->next_iv)
4827 if (v->same && v->same->ignore)
4833 /* Update expression if this was combined, in case other giv was
4836 v->new_reg = replace_rtx (v->new_reg,
4837 v->same->dest_reg, v->same->new_reg);
4839 /* See if this register is known to be a pointer to something. If
4840 so, see if we can find the alignment. First see if there is a
4841 destination register that is a pointer. If so, this shares the
4842 alignment too. Next see if we can deduce anything from the
4843 computational information. If not, and this is a DEST_ADDR
4844 giv, at least we know that it's a pointer, though we don't know
4846 if (GET_CODE (v->new_reg) == REG
4847 && v->giv_type == DEST_REG
4848 && REG_POINTER (v->dest_reg))
4849 mark_reg_pointer (v->new_reg,
4850 REGNO_POINTER_ALIGN (REGNO (v->dest_reg)));
4851 else if (GET_CODE (v->new_reg) == REG
4852 && REG_POINTER (v->src_reg))
4854 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->src_reg));
4857 || GET_CODE (v->add_val) != CONST_INT
4858 || INTVAL (v->add_val) % (align / BITS_PER_UNIT) != 0)
4861 mark_reg_pointer (v->new_reg, align);
4863 else if (GET_CODE (v->new_reg) == REG
4864 && GET_CODE (v->add_val) == REG
4865 && REG_POINTER (v->add_val))
4867 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->add_val));
4869 if (align == 0 || GET_CODE (v->mult_val) != CONST_INT
4870 || INTVAL (v->mult_val) % (align / BITS_PER_UNIT) != 0)
4873 mark_reg_pointer (v->new_reg, align);
4875 else if (GET_CODE (v->new_reg) == REG && v->giv_type == DEST_ADDR)
4876 mark_reg_pointer (v->new_reg, 0);
4878 if (v->giv_type == DEST_ADDR)
4879 /* Store reduced reg as the address in the memref where we found
4881 validate_change (v->insn, v->location, v->new_reg, 0);
4882 else if (v->replaceable)
4884 reg_map[REGNO (v->dest_reg)] = v->new_reg;
4888 rtx original_insn = v->insn;
4891 /* Not replaceable; emit an insn to set the original giv reg from
4892 the reduced giv, same as above. */
4893 v->insn = loop_insn_emit_after (loop, 0, original_insn,
4894 gen_move_insn (v->dest_reg,
4897 /* The original insn may have a REG_EQUAL note. This note is
4898 now incorrect and may result in invalid substitutions later.
4899 The original insn is dead, but may be part of a libcall
4900 sequence, which doesn't seem worth the bother of handling. */
4901 note = find_reg_note (original_insn, REG_EQUAL, NULL_RTX);
4903 remove_note (original_insn, note);
4906 /* When a loop is reversed, givs which depend on the reversed
4907 biv, and which are live outside the loop, must be set to their
4908 correct final value. This insn is only needed if the giv is
4909 not replaceable. The correct final value is the same as the
4910 value that the giv starts the reversed loop with. */
4911 if (bl->reversed && ! v->replaceable)
4912 loop_iv_add_mult_sink (loop,
4913 extend_value_for_giv (v, bl->initial_value),
4914 v->mult_val, v->add_val, v->dest_reg);
4915 else if (v->final_value)
4916 loop_insn_sink_or_swim (loop,
4917 gen_load_of_final_value (v->dest_reg,
4920 if (loop_dump_stream)
4922 fprintf (loop_dump_stream, "giv at %d reduced to ",
4923 INSN_UID (v->insn));
4924 print_simple_rtl (loop_dump_stream, v->new_reg);
4925 fprintf (loop_dump_stream, "\n");
4932 loop_giv_reduce_benefit (struct loop *loop ATTRIBUTE_UNUSED,
4933 struct iv_class *bl, struct induction *v,
4939 benefit = v->benefit;
4940 PUT_MODE (test_reg, v->mode);
4941 add_cost = iv_add_mult_cost (bl->biv->add_val, v->mult_val,
4942 test_reg, test_reg);
4944 /* Reduce benefit if not replaceable, since we will insert a
4945 move-insn to replace the insn that calculates this giv. Don't do
4946 this unless the giv is a user variable, since it will often be
4947 marked non-replaceable because of the duplication of the exit
4948 code outside the loop. In such a case, the copies we insert are
4949 dead and will be deleted. So they don't have a cost. Similar
4950 situations exist. */
4951 /* ??? The new final_[bg]iv_value code does a much better job of
4952 finding replaceable giv's, and hence this code may no longer be
4954 if (! v->replaceable && ! bl->eliminable
4955 && REG_USERVAR_P (v->dest_reg))
4956 benefit -= copy_cost;
4958 /* Decrease the benefit to count the add-insns that we will insert
4959 to increment the reduced reg for the giv. ??? This can
4960 overestimate the run-time cost of the additional insns, e.g. if
4961 there are multiple basic blocks that increment the biv, but only
4962 one of these blocks is executed during each iteration. There is
4963 no good way to detect cases like this with the current structure
4964 of the loop optimizer. This code is more accurate for
4965 determining code size than run-time benefits. */
4966 benefit -= add_cost * bl->biv_count;
4968 /* Decide whether to strength-reduce this giv or to leave the code
4969 unchanged (recompute it from the biv each time it is used). This
4970 decision can be made independently for each giv. */
4973 /* Attempt to guess whether autoincrement will handle some of the
4974 new add insns; if so, increase BENEFIT (undo the subtraction of
4975 add_cost that was done above). */
4976 if (v->giv_type == DEST_ADDR
4977 /* Increasing the benefit is risky, since this is only a guess.
4978 Avoid increasing register pressure in cases where there would
4979 be no other benefit from reducing this giv. */
4981 && GET_CODE (v->mult_val) == CONST_INT)
4983 int size = GET_MODE_SIZE (GET_MODE (v->mem));
4985 if (HAVE_POST_INCREMENT
4986 && INTVAL (v->mult_val) == size)
4987 benefit += add_cost * bl->biv_count;
4988 else if (HAVE_PRE_INCREMENT
4989 && INTVAL (v->mult_val) == size)
4990 benefit += add_cost * bl->biv_count;
4991 else if (HAVE_POST_DECREMENT
4992 && -INTVAL (v->mult_val) == size)
4993 benefit += add_cost * bl->biv_count;
4994 else if (HAVE_PRE_DECREMENT
4995 && -INTVAL (v->mult_val) == size)
4996 benefit += add_cost * bl->biv_count;
5004 /* Free IV structures for LOOP. */
5007 loop_ivs_free (struct loop *loop)
5009 struct loop_ivs *ivs = LOOP_IVS (loop);
5010 struct iv_class *iv = ivs->list;
5016 struct iv_class *next = iv->next;
5017 struct induction *induction;
5018 struct induction *next_induction;
5020 for (induction = iv->biv; induction; induction = next_induction)
5022 next_induction = induction->next_iv;
5025 for (induction = iv->giv; induction; induction = next_induction)
5027 next_induction = induction->next_iv;
5037 /* Perform strength reduction and induction variable elimination.
5039 Pseudo registers created during this function will be beyond the
5040 last valid index in several tables including
5041 REGS->ARRAY[I].N_TIMES_SET and REGNO_LAST_UID. This does not cause a
5042 problem here, because the added registers cannot be givs outside of
5043 their loop, and hence will never be reconsidered. But scan_loop
5044 must check regnos to make sure they are in bounds. */
5047 strength_reduce (struct loop *loop, int flags)
5049 struct loop_info *loop_info = LOOP_INFO (loop);
5050 struct loop_regs *regs = LOOP_REGS (loop);
5051 struct loop_ivs *ivs = LOOP_IVS (loop);
5053 /* Temporary list pointer for traversing ivs->list. */
5054 struct iv_class *bl;
5055 /* Ratio of extra register life span we can justify
5056 for saving an instruction. More if loop doesn't call subroutines
5057 since in that case saving an insn makes more difference
5058 and more registers are available. */
5059 /* ??? could set this to last value of threshold in move_movables */
5060 int threshold = (loop_info->has_call ? 1 : 2) * (3 + n_non_fixed_regs);
5061 /* Map of pseudo-register replacements. */
5062 rtx *reg_map = NULL;
5064 int unrolled_insn_copies = 0;
5065 rtx test_reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
5066 int insn_count = count_insns_in_loop (loop);
5068 addr_placeholder = gen_reg_rtx (Pmode);
5070 ivs->n_regs = max_reg_before_loop;
5071 ivs->regs = (struct iv *) xcalloc (ivs->n_regs, sizeof (struct iv));
5073 /* Find all BIVs in loop. */
5074 loop_bivs_find (loop);
5076 /* Exit if there are no bivs. */
5079 /* Can still unroll the loop anyways, but indicate that there is no
5080 strength reduction info available. */
5081 if (flags & LOOP_UNROLL)
5082 unroll_loop (loop, insn_count, 0);
5084 loop_ivs_free (loop);
5088 /* Determine how BIVS are initialized by looking through pre-header
5089 extended basic block. */
5090 loop_bivs_init_find (loop);
5092 /* Look at the each biv and see if we can say anything better about its
5093 initial value from any initializing insns set up above. */
5094 loop_bivs_check (loop);
5096 /* Search the loop for general induction variables. */
5097 loop_givs_find (loop);
5099 /* Try to calculate and save the number of loop iterations. This is
5100 set to zero if the actual number can not be calculated. This must
5101 be called after all giv's have been identified, since otherwise it may
5102 fail if the iteration variable is a giv. */
5103 loop_iterations (loop);
5105 #ifdef HAVE_prefetch
5106 if (flags & LOOP_PREFETCH)
5107 emit_prefetch_instructions (loop);
5110 /* Now for each giv for which we still don't know whether or not it is
5111 replaceable, check to see if it is replaceable because its final value
5112 can be calculated. This must be done after loop_iterations is called,
5113 so that final_giv_value will work correctly. */
5114 loop_givs_check (loop);
5116 /* Try to prove that the loop counter variable (if any) is always
5117 nonnegative; if so, record that fact with a REG_NONNEG note
5118 so that "decrement and branch until zero" insn can be used. */
5119 check_dbra_loop (loop, insn_count);
5121 /* Create reg_map to hold substitutions for replaceable giv regs.
5122 Some givs might have been made from biv increments, so look at
5123 ivs->reg_iv_type for a suitable size. */
5124 reg_map_size = ivs->n_regs;
5125 reg_map = (rtx *) xcalloc (reg_map_size, sizeof (rtx));
5127 /* Examine each iv class for feasibility of strength reduction/induction
5128 variable elimination. */
5130 for (bl = ivs->list; bl; bl = bl->next)
5132 struct induction *v;
5135 /* Test whether it will be possible to eliminate this biv
5136 provided all givs are reduced. */
5137 bl->eliminable = loop_biv_eliminable_p (loop, bl, threshold, insn_count);
5139 /* This will be true at the end, if all givs which depend on this
5140 biv have been strength reduced.
5141 We can't (currently) eliminate the biv unless this is so. */
5142 bl->all_reduced = 1;
5144 /* Check each extension dependent giv in this class to see if its
5145 root biv is safe from wrapping in the interior mode. */
5146 check_ext_dependent_givs (loop, bl);
5148 /* Combine all giv's for this iv_class. */
5149 combine_givs (regs, bl);
5151 for (v = bl->giv; v; v = v->next_iv)
5153 struct induction *tv;
5155 if (v->ignore || v->same)
5158 benefit = loop_giv_reduce_benefit (loop, bl, v, test_reg);
5160 /* If an insn is not to be strength reduced, then set its ignore
5161 flag, and clear bl->all_reduced. */
5163 /* A giv that depends on a reversed biv must be reduced if it is
5164 used after the loop exit, otherwise, it would have the wrong
5165 value after the loop exit. To make it simple, just reduce all
5166 of such giv's whether or not we know they are used after the loop
5169 if (! flag_reduce_all_givs
5170 && v->lifetime * threshold * benefit < insn_count
5173 if (loop_dump_stream)
5174 fprintf (loop_dump_stream,
5175 "giv of insn %d not worth while, %d vs %d.\n",
5177 v->lifetime * threshold * benefit, insn_count);
5179 bl->all_reduced = 0;
5183 /* Check that we can increment the reduced giv without a
5184 multiply insn. If not, reject it. */
5186 for (tv = bl->biv; tv; tv = tv->next_iv)
5187 if (tv->mult_val == const1_rtx
5188 && ! product_cheap_p (tv->add_val, v->mult_val))
5190 if (loop_dump_stream)
5191 fprintf (loop_dump_stream,
5192 "giv of insn %d: would need a multiply.\n",
5193 INSN_UID (v->insn));
5195 bl->all_reduced = 0;
5201 /* Check for givs whose first use is their definition and whose
5202 last use is the definition of another giv. If so, it is likely
5203 dead and should not be used to derive another giv nor to
5205 loop_givs_dead_check (loop, bl);
5207 /* Reduce each giv that we decided to reduce. */
5208 loop_givs_reduce (loop, bl);
5210 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
5213 For each giv register that can be reduced now: if replaceable,
5214 substitute reduced reg wherever the old giv occurs;
5215 else add new move insn "giv_reg = reduced_reg". */
5216 loop_givs_rescan (loop, bl, reg_map);
5218 /* All the givs based on the biv bl have been reduced if they
5221 /* For each giv not marked as maybe dead that has been combined with a
5222 second giv, clear any "maybe dead" mark on that second giv.
5223 v->new_reg will either be or refer to the register of the giv it
5226 Doing this clearing avoids problems in biv elimination where
5227 a giv's new_reg is a complex value that can't be put in the
5228 insn but the giv combined with (with a reg as new_reg) is
5229 marked maybe_dead. Since the register will be used in either
5230 case, we'd prefer it be used from the simpler giv. */
5232 for (v = bl->giv; v; v = v->next_iv)
5233 if (! v->maybe_dead && v->same)
5234 v->same->maybe_dead = 0;
5236 /* Try to eliminate the biv, if it is a candidate.
5237 This won't work if ! bl->all_reduced,
5238 since the givs we planned to use might not have been reduced.
5240 We have to be careful that we didn't initially think we could
5241 eliminate this biv because of a giv that we now think may be
5242 dead and shouldn't be used as a biv replacement.
5244 Also, there is the possibility that we may have a giv that looks
5245 like it can be used to eliminate a biv, but the resulting insn
5246 isn't valid. This can happen, for example, on the 88k, where a
5247 JUMP_INSN can compare a register only with zero. Attempts to
5248 replace it with a compare with a constant will fail.
5250 Note that in cases where this call fails, we may have replaced some
5251 of the occurrences of the biv with a giv, but no harm was done in
5252 doing so in the rare cases where it can occur. */
5254 if (bl->all_reduced == 1 && bl->eliminable
5255 && maybe_eliminate_biv (loop, bl, 1, threshold, insn_count))
5257 /* ?? If we created a new test to bypass the loop entirely,
5258 or otherwise drop straight in, based on this test, then
5259 we might want to rewrite it also. This way some later
5260 pass has more hope of removing the initialization of this
5263 /* If final_value != 0, then the biv may be used after loop end
5264 and we must emit an insn to set it just in case.
5266 Reversed bivs already have an insn after the loop setting their
5267 value, so we don't need another one. We can't calculate the
5268 proper final value for such a biv here anyways. */
5269 if (bl->final_value && ! bl->reversed)
5270 loop_insn_sink_or_swim (loop,
5271 gen_load_of_final_value (bl->biv->dest_reg,
5274 if (loop_dump_stream)
5275 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
5278 /* See above note wrt final_value. But since we couldn't eliminate
5279 the biv, we must set the value after the loop instead of before. */
5280 else if (bl->final_value && ! bl->reversed)
5281 loop_insn_sink (loop, gen_load_of_final_value (bl->biv->dest_reg,
5285 /* Go through all the instructions in the loop, making all the
5286 register substitutions scheduled in REG_MAP. */
5288 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
5289 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5290 || GET_CODE (p) == CALL_INSN)
5292 replace_regs (PATTERN (p), reg_map, reg_map_size, 0);
5293 replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0);
5297 if (loop_info->n_iterations > 0)
5299 /* When we completely unroll a loop we will likely not need the increment
5300 of the loop BIV and we will not need the conditional branch at the
5302 unrolled_insn_copies = insn_count - 2;
5305 /* When we completely unroll a loop on a HAVE_cc0 machine we will not
5306 need the comparison before the conditional branch at the end of the
5308 unrolled_insn_copies -= 1;
5311 /* We'll need one copy for each loop iteration. */
5312 unrolled_insn_copies *= loop_info->n_iterations;
5314 /* A little slop to account for the ability to remove initialization
5315 code, better CSE, and other secondary benefits of completely
5316 unrolling some loops. */
5317 unrolled_insn_copies -= 1;
5319 /* Clamp the value. */
5320 if (unrolled_insn_copies < 0)
5321 unrolled_insn_copies = 0;
5324 /* Unroll loops from within strength reduction so that we can use the
5325 induction variable information that strength_reduce has already
5326 collected. Always unroll loops that would be as small or smaller
5327 unrolled than when rolled. */
5328 if ((flags & LOOP_UNROLL)
5329 || ((flags & LOOP_AUTO_UNROLL)
5330 && loop_info->n_iterations > 0
5331 && unrolled_insn_copies <= insn_count))
5332 unroll_loop (loop, insn_count, 1);
5334 #ifdef HAVE_doloop_end
5335 if (HAVE_doloop_end && (flags & LOOP_BCT) && flag_branch_on_count_reg)
5336 doloop_optimize (loop);
5337 #endif /* HAVE_doloop_end */
5339 /* In case number of iterations is known, drop branch prediction note
5340 in the branch. Do that only in second loop pass, as loop unrolling
5341 may change the number of iterations performed. */
5342 if (flags & LOOP_BCT)
5344 unsigned HOST_WIDE_INT n
5345 = loop_info->n_iterations / loop_info->unroll_number;
5347 predict_insn (prev_nonnote_insn (loop->end), PRED_LOOP_ITERATIONS,
5348 REG_BR_PROB_BASE - REG_BR_PROB_BASE / n);
5351 if (loop_dump_stream)
5352 fprintf (loop_dump_stream, "\n");
5354 loop_ivs_free (loop);
5359 /*Record all basic induction variables calculated in the insn. */
5361 check_insn_for_bivs (struct loop *loop, rtx p, int not_every_iteration,
5364 struct loop_ivs *ivs = LOOP_IVS (loop);
5371 if (GET_CODE (p) == INSN
5372 && (set = single_set (p))
5373 && GET_CODE (SET_DEST (set)) == REG)
5375 dest_reg = SET_DEST (set);
5376 if (REGNO (dest_reg) < max_reg_before_loop
5377 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
5378 && REG_IV_TYPE (ivs, REGNO (dest_reg)) != NOT_BASIC_INDUCT)
5380 if (basic_induction_var (loop, SET_SRC (set),
5381 GET_MODE (SET_SRC (set)),
5382 dest_reg, p, &inc_val, &mult_val,
5385 /* It is a possible basic induction variable.
5386 Create and initialize an induction structure for it. */
5389 = (struct induction *) xmalloc (sizeof (struct induction));
5391 record_biv (loop, v, p, dest_reg, inc_val, mult_val, location,
5392 not_every_iteration, maybe_multiple);
5393 REG_IV_TYPE (ivs, REGNO (dest_reg)) = BASIC_INDUCT;
5395 else if (REGNO (dest_reg) < ivs->n_regs)
5396 REG_IV_TYPE (ivs, REGNO (dest_reg)) = NOT_BASIC_INDUCT;
5402 /* Record all givs calculated in the insn.
5403 A register is a giv if: it is only set once, it is a function of a
5404 biv and a constant (or invariant), and it is not a biv. */
5406 check_insn_for_givs (struct loop *loop, rtx p, int not_every_iteration,
5409 struct loop_regs *regs = LOOP_REGS (loop);
5412 /* Look for a general induction variable in a register. */
5413 if (GET_CODE (p) == INSN
5414 && (set = single_set (p))
5415 && GET_CODE (SET_DEST (set)) == REG
5416 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
5425 rtx last_consec_insn;
5427 dest_reg = SET_DEST (set);
5428 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
5431 if (/* SET_SRC is a giv. */
5432 (general_induction_var (loop, SET_SRC (set), &src_reg, &add_val,
5433 &mult_val, &ext_val, 0, &benefit, VOIDmode)
5434 /* Equivalent expression is a giv. */
5435 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
5436 && general_induction_var (loop, XEXP (regnote, 0), &src_reg,
5437 &add_val, &mult_val, &ext_val, 0,
5438 &benefit, VOIDmode)))
5439 /* Don't try to handle any regs made by loop optimization.
5440 We have nothing on them in regno_first_uid, etc. */
5441 && REGNO (dest_reg) < max_reg_before_loop
5442 /* Don't recognize a BASIC_INDUCT_VAR here. */
5443 && dest_reg != src_reg
5444 /* This must be the only place where the register is set. */
5445 && (regs->array[REGNO (dest_reg)].n_times_set == 1
5446 /* or all sets must be consecutive and make a giv. */
5447 || (benefit = consec_sets_giv (loop, benefit, p,
5449 &add_val, &mult_val, &ext_val,
5450 &last_consec_insn))))
5453 = (struct induction *) xmalloc (sizeof (struct induction));
5455 /* If this is a library call, increase benefit. */
5456 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
5457 benefit += libcall_benefit (p);
5459 /* Skip the consecutive insns, if there are any. */
5460 if (regs->array[REGNO (dest_reg)].n_times_set != 1)
5461 p = last_consec_insn;
5463 record_giv (loop, v, p, src_reg, dest_reg, mult_val, add_val,
5464 ext_val, benefit, DEST_REG, not_every_iteration,
5465 maybe_multiple, (rtx*) 0);
5470 /* Look for givs which are memory addresses. */
5471 if (GET_CODE (p) == INSN)
5472 find_mem_givs (loop, PATTERN (p), p, not_every_iteration,
5475 /* Update the status of whether giv can derive other givs. This can
5476 change when we pass a label or an insn that updates a biv. */
5477 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5478 || GET_CODE (p) == CODE_LABEL)
5479 update_giv_derive (loop, p);
5483 /* Return 1 if X is a valid source for an initial value (or as value being
5484 compared against in an initial test).
5486 X must be either a register or constant and must not be clobbered between
5487 the current insn and the start of the loop.
5489 INSN is the insn containing X. */
5492 valid_initial_value_p (rtx x, rtx insn, int call_seen, rtx loop_start)
5497 /* Only consider pseudos we know about initialized in insns whose luids
5499 if (GET_CODE (x) != REG
5500 || REGNO (x) >= max_reg_before_loop)
5503 /* Don't use call-clobbered registers across a call which clobbers it. On
5504 some machines, don't use any hard registers at all. */
5505 if (REGNO (x) < FIRST_PSEUDO_REGISTER
5506 && (SMALL_REGISTER_CLASSES
5507 || (call_used_regs[REGNO (x)] && call_seen)))
5510 /* Don't use registers that have been clobbered before the start of the
5512 if (reg_set_between_p (x, insn, loop_start))
5518 /* Scan X for memory refs and check each memory address
5519 as a possible giv. INSN is the insn whose pattern X comes from.
5520 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
5521 every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed
5522 more than once in each loop iteration. */
5525 find_mem_givs (const struct loop *loop, rtx x, rtx insn,
5526 int not_every_iteration, int maybe_multiple)
5535 code = GET_CODE (x);
5560 /* This code used to disable creating GIVs with mult_val == 1 and
5561 add_val == 0. However, this leads to lost optimizations when
5562 it comes time to combine a set of related DEST_ADDR GIVs, since
5563 this one would not be seen. */
5565 if (general_induction_var (loop, XEXP (x, 0), &src_reg, &add_val,
5566 &mult_val, &ext_val, 1, &benefit,
5569 /* Found one; record it. */
5571 = (struct induction *) xmalloc (sizeof (struct induction));
5573 record_giv (loop, v, insn, src_reg, addr_placeholder, mult_val,
5574 add_val, ext_val, benefit, DEST_ADDR,
5575 not_every_iteration, maybe_multiple, &XEXP (x, 0));
5586 /* Recursively scan the subexpressions for other mem refs. */
5588 fmt = GET_RTX_FORMAT (code);
5589 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5591 find_mem_givs (loop, XEXP (x, i), insn, not_every_iteration,
5593 else if (fmt[i] == 'E')
5594 for (j = 0; j < XVECLEN (x, i); j++)
5595 find_mem_givs (loop, XVECEXP (x, i, j), insn, not_every_iteration,
5599 /* Fill in the data about one biv update.
5600 V is the `struct induction' in which we record the biv. (It is
5601 allocated by the caller, with alloca.)
5602 INSN is the insn that sets it.
5603 DEST_REG is the biv's reg.
5605 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
5606 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
5607 being set to INC_VAL.
5609 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
5610 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
5611 can be executed more than once per iteration. If MAYBE_MULTIPLE
5612 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
5613 executed exactly once per iteration. */
5616 record_biv (struct loop *loop, struct induction *v, rtx insn, rtx dest_reg,
5617 rtx inc_val, rtx mult_val, rtx *location,
5618 int not_every_iteration, int maybe_multiple)
5620 struct loop_ivs *ivs = LOOP_IVS (loop);
5621 struct iv_class *bl;
5624 v->src_reg = dest_reg;
5625 v->dest_reg = dest_reg;
5626 v->mult_val = mult_val;
5627 v->add_val = inc_val;
5628 v->ext_dependent = NULL_RTX;
5629 v->location = location;
5630 v->mode = GET_MODE (dest_reg);
5631 v->always_computable = ! not_every_iteration;
5632 v->always_executed = ! not_every_iteration;
5633 v->maybe_multiple = maybe_multiple;
5636 /* Add this to the reg's iv_class, creating a class
5637 if this is the first incrementation of the reg. */
5639 bl = REG_IV_CLASS (ivs, REGNO (dest_reg));
5642 /* Create and initialize new iv_class. */
5644 bl = (struct iv_class *) xmalloc (sizeof (struct iv_class));
5646 bl->regno = REGNO (dest_reg);
5652 /* Set initial value to the reg itself. */
5653 bl->initial_value = dest_reg;
5654 bl->final_value = 0;
5655 /* We haven't seen the initializing insn yet */
5658 bl->initial_test = 0;
5659 bl->incremented = 0;
5663 bl->total_benefit = 0;
5665 /* Add this class to ivs->list. */
5666 bl->next = ivs->list;
5669 /* Put it in the array of biv register classes. */
5670 REG_IV_CLASS (ivs, REGNO (dest_reg)) = bl;
5674 /* Check if location is the same as a previous one. */
5675 struct induction *induction;
5676 for (induction = bl->biv; induction; induction = induction->next_iv)
5677 if (location == induction->location)
5679 v->same = induction;
5684 /* Update IV_CLASS entry for this biv. */
5685 v->next_iv = bl->biv;
5688 if (mult_val == const1_rtx)
5689 bl->incremented = 1;
5691 if (loop_dump_stream)
5692 loop_biv_dump (v, loop_dump_stream, 0);
5695 /* Fill in the data about one giv.
5696 V is the `struct induction' in which we record the giv. (It is
5697 allocated by the caller, with alloca.)
5698 INSN is the insn that sets it.
5699 BENEFIT estimates the savings from deleting this insn.
5700 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
5701 into a register or is used as a memory address.
5703 SRC_REG is the biv reg which the giv is computed from.
5704 DEST_REG is the giv's reg (if the giv is stored in a reg).
5705 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
5706 LOCATION points to the place where this giv's value appears in INSN. */
5709 record_giv (const struct loop *loop, struct induction *v, rtx insn,
5710 rtx src_reg, rtx dest_reg, rtx mult_val, rtx add_val,
5711 rtx ext_val, int benefit, enum g_types type,
5712 int not_every_iteration, int maybe_multiple, rtx *location)
5714 struct loop_ivs *ivs = LOOP_IVS (loop);
5715 struct induction *b;
5716 struct iv_class *bl;
5717 rtx set = single_set (insn);
5720 /* Attempt to prove constantness of the values. Don't let simplify_rtx
5721 undo the MULT canonicalization that we performed earlier. */
5722 temp = simplify_rtx (add_val);
5724 && ! (GET_CODE (add_val) == MULT
5725 && GET_CODE (temp) == ASHIFT))
5729 v->src_reg = src_reg;
5731 v->dest_reg = dest_reg;
5732 v->mult_val = mult_val;
5733 v->add_val = add_val;
5734 v->ext_dependent = ext_val;
5735 v->benefit = benefit;
5736 v->location = location;
5738 v->combined_with = 0;
5739 v->maybe_multiple = maybe_multiple;
5741 v->derive_adjustment = 0;
5747 v->auto_inc_opt = 0;
5751 /* The v->always_computable field is used in update_giv_derive, to
5752 determine whether a giv can be used to derive another giv. For a
5753 DEST_REG giv, INSN computes a new value for the giv, so its value
5754 isn't computable if INSN insn't executed every iteration.
5755 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
5756 it does not compute a new value. Hence the value is always computable
5757 regardless of whether INSN is executed each iteration. */
5759 if (type == DEST_ADDR)
5760 v->always_computable = 1;
5762 v->always_computable = ! not_every_iteration;
5764 v->always_executed = ! not_every_iteration;
5766 if (type == DEST_ADDR)
5768 v->mode = GET_MODE (*location);
5771 else /* type == DEST_REG */
5773 v->mode = GET_MODE (SET_DEST (set));
5775 v->lifetime = LOOP_REG_LIFETIME (loop, REGNO (dest_reg));
5777 /* If the lifetime is zero, it means that this register is
5778 really a dead store. So mark this as a giv that can be
5779 ignored. This will not prevent the biv from being eliminated. */
5780 if (v->lifetime == 0)
5783 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
5784 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
5787 /* Add the giv to the class of givs computed from one biv. */
5789 bl = REG_IV_CLASS (ivs, REGNO (src_reg));
5792 v->next_iv = bl->giv;
5794 /* Don't count DEST_ADDR. This is supposed to count the number of
5795 insns that calculate givs. */
5796 if (type == DEST_REG)
5798 bl->total_benefit += benefit;
5801 /* Fatal error, biv missing for this giv? */
5804 if (type == DEST_ADDR)
5807 v->not_replaceable = 0;
5811 /* The giv can be replaced outright by the reduced register only if all
5812 of the following conditions are true:
5813 - the insn that sets the giv is always executed on any iteration
5814 on which the giv is used at all
5815 (there are two ways to deduce this:
5816 either the insn is executed on every iteration,
5817 or all uses follow that insn in the same basic block),
5818 - the giv is not used outside the loop
5819 - no assignments to the biv occur during the giv's lifetime. */
5821 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
5822 /* Previous line always fails if INSN was moved by loop opt. */
5823 && REGNO_LAST_LUID (REGNO (dest_reg))
5824 < INSN_LUID (loop->end)
5825 && (! not_every_iteration
5826 || last_use_this_basic_block (dest_reg, insn)))
5828 /* Now check that there are no assignments to the biv within the
5829 giv's lifetime. This requires two separate checks. */
5831 /* Check each biv update, and fail if any are between the first
5832 and last use of the giv.
5834 If this loop contains an inner loop that was unrolled, then
5835 the insn modifying the biv may have been emitted by the loop
5836 unrolling code, and hence does not have a valid luid. Just
5837 mark the biv as not replaceable in this case. It is not very
5838 useful as a biv, because it is used in two different loops.
5839 It is very unlikely that we would be able to optimize the giv
5840 using this biv anyways. */
5843 v->not_replaceable = 0;
5844 for (b = bl->biv; b; b = b->next_iv)
5846 if (INSN_UID (b->insn) >= max_uid_for_loop
5847 || ((INSN_LUID (b->insn)
5848 >= REGNO_FIRST_LUID (REGNO (dest_reg)))
5849 && (INSN_LUID (b->insn)
5850 <= REGNO_LAST_LUID (REGNO (dest_reg)))))
5853 v->not_replaceable = 1;
5858 /* If there are any backwards branches that go from after the
5859 biv update to before it, then this giv is not replaceable. */
5861 for (b = bl->biv; b; b = b->next_iv)
5862 if (back_branch_in_range_p (loop, b->insn))
5865 v->not_replaceable = 1;
5871 /* May still be replaceable, we don't have enough info here to
5874 v->not_replaceable = 0;
5878 /* Record whether the add_val contains a const_int, for later use by
5883 v->no_const_addval = 1;
5884 if (tem == const0_rtx)
5886 else if (CONSTANT_P (add_val))
5887 v->no_const_addval = 0;
5888 if (GET_CODE (tem) == PLUS)
5892 if (GET_CODE (XEXP (tem, 0)) == PLUS)
5893 tem = XEXP (tem, 0);
5894 else if (GET_CODE (XEXP (tem, 1)) == PLUS)
5895 tem = XEXP (tem, 1);
5899 if (CONSTANT_P (XEXP (tem, 1)))
5900 v->no_const_addval = 0;
5904 if (loop_dump_stream)
5905 loop_giv_dump (v, loop_dump_stream, 0);
5908 /* All this does is determine whether a giv can be made replaceable because
5909 its final value can be calculated. This code can not be part of record_giv
5910 above, because final_giv_value requires that the number of loop iterations
5911 be known, and that can not be accurately calculated until after all givs
5912 have been identified. */
5915 check_final_value (const struct loop *loop, struct induction *v)
5917 rtx final_value = 0;
5919 /* DEST_ADDR givs will never reach here, because they are always marked
5920 replaceable above in record_giv. */
5922 /* The giv can be replaced outright by the reduced register only if all
5923 of the following conditions are true:
5924 - the insn that sets the giv is always executed on any iteration
5925 on which the giv is used at all
5926 (there are two ways to deduce this:
5927 either the insn is executed on every iteration,
5928 or all uses follow that insn in the same basic block),
5929 - its final value can be calculated (this condition is different
5930 than the one above in record_giv)
5931 - it's not used before the it's set
5932 - no assignments to the biv occur during the giv's lifetime. */
5935 /* This is only called now when replaceable is known to be false. */
5936 /* Clear replaceable, so that it won't confuse final_giv_value. */
5940 if ((final_value = final_giv_value (loop, v))
5941 && (v->always_executed
5942 || last_use_this_basic_block (v->dest_reg, v->insn)))
5944 int biv_increment_seen = 0, before_giv_insn = 0;
5949 v->not_replaceable = 0;
5951 /* When trying to determine whether or not a biv increment occurs
5952 during the lifetime of the giv, we can ignore uses of the variable
5953 outside the loop because final_value is true. Hence we can not
5954 use regno_last_uid and regno_first_uid as above in record_giv. */
5956 /* Search the loop to determine whether any assignments to the
5957 biv occur during the giv's lifetime. Start with the insn
5958 that sets the giv, and search around the loop until we come
5959 back to that insn again.
5961 Also fail if there is a jump within the giv's lifetime that jumps
5962 to somewhere outside the lifetime but still within the loop. This
5963 catches spaghetti code where the execution order is not linear, and
5964 hence the above test fails. Here we assume that the giv lifetime
5965 does not extend from one iteration of the loop to the next, so as
5966 to make the test easier. Since the lifetime isn't known yet,
5967 this requires two loops. See also record_giv above. */
5969 last_giv_use = v->insn;
5976 before_giv_insn = 1;
5977 p = NEXT_INSN (loop->start);
5982 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5983 || GET_CODE (p) == CALL_INSN)
5985 /* It is possible for the BIV increment to use the GIV if we
5986 have a cycle. Thus we must be sure to check each insn for
5987 both BIV and GIV uses, and we must check for BIV uses
5990 if (! biv_increment_seen
5991 && reg_set_p (v->src_reg, PATTERN (p)))
5992 biv_increment_seen = 1;
5994 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
5996 if (biv_increment_seen || before_giv_insn)
5999 v->not_replaceable = 1;
6007 /* Now that the lifetime of the giv is known, check for branches
6008 from within the lifetime to outside the lifetime if it is still
6018 p = NEXT_INSN (loop->start);
6019 if (p == last_giv_use)
6022 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
6023 && LABEL_NAME (JUMP_LABEL (p))
6024 && ((loop_insn_first_p (JUMP_LABEL (p), v->insn)
6025 && loop_insn_first_p (loop->start, JUMP_LABEL (p)))
6026 || (loop_insn_first_p (last_giv_use, JUMP_LABEL (p))
6027 && loop_insn_first_p (JUMP_LABEL (p), loop->end))))
6030 v->not_replaceable = 1;
6032 if (loop_dump_stream)
6033 fprintf (loop_dump_stream,
6034 "Found branch outside giv lifetime.\n");
6041 /* If it is replaceable, then save the final value. */
6043 v->final_value = final_value;
6046 if (loop_dump_stream && v->replaceable)
6047 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
6048 INSN_UID (v->insn), REGNO (v->dest_reg));
6051 /* Update the status of whether a giv can derive other givs.
6053 We need to do something special if there is or may be an update to the biv
6054 between the time the giv is defined and the time it is used to derive
6057 In addition, a giv that is only conditionally set is not allowed to
6058 derive another giv once a label has been passed.
6060 The cases we look at are when a label or an update to a biv is passed. */
6063 update_giv_derive (const struct loop *loop, rtx p)
6065 struct loop_ivs *ivs = LOOP_IVS (loop);
6066 struct iv_class *bl;
6067 struct induction *biv, *giv;
6071 /* Search all IV classes, then all bivs, and finally all givs.
6073 There are three cases we are concerned with. First we have the situation
6074 of a giv that is only updated conditionally. In that case, it may not
6075 derive any givs after a label is passed.
6077 The second case is when a biv update occurs, or may occur, after the
6078 definition of a giv. For certain biv updates (see below) that are
6079 known to occur between the giv definition and use, we can adjust the
6080 giv definition. For others, or when the biv update is conditional,
6081 we must prevent the giv from deriving any other givs. There are two
6082 sub-cases within this case.
6084 If this is a label, we are concerned with any biv update that is done
6085 conditionally, since it may be done after the giv is defined followed by
6086 a branch here (actually, we need to pass both a jump and a label, but
6087 this extra tracking doesn't seem worth it).
6089 If this is a jump, we are concerned about any biv update that may be
6090 executed multiple times. We are actually only concerned about
6091 backward jumps, but it is probably not worth performing the test
6092 on the jump again here.
6094 If this is a biv update, we must adjust the giv status to show that a
6095 subsequent biv update was performed. If this adjustment cannot be done,
6096 the giv cannot derive further givs. */
6098 for (bl = ivs->list; bl; bl = bl->next)
6099 for (biv = bl->biv; biv; biv = biv->next_iv)
6100 if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
6103 for (giv = bl->giv; giv; giv = giv->next_iv)
6105 /* If cant_derive is already true, there is no point in
6106 checking all of these conditions again. */
6107 if (giv->cant_derive)
6110 /* If this giv is conditionally set and we have passed a label,
6111 it cannot derive anything. */
6112 if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
6113 giv->cant_derive = 1;
6115 /* Skip givs that have mult_val == 0, since
6116 they are really invariants. Also skip those that are
6117 replaceable, since we know their lifetime doesn't contain
6119 else if (giv->mult_val == const0_rtx || giv->replaceable)
6122 /* The only way we can allow this giv to derive another
6123 is if this is a biv increment and we can form the product
6124 of biv->add_val and giv->mult_val. In this case, we will
6125 be able to compute a compensation. */
6126 else if (biv->insn == p)
6131 if (biv->mult_val == const1_rtx)
6132 tem = simplify_giv_expr (loop,
6133 gen_rtx_MULT (giv->mode,
6136 &ext_val_dummy, &dummy);
6138 if (tem && giv->derive_adjustment)
6139 tem = simplify_giv_expr
6141 gen_rtx_PLUS (giv->mode, tem, giv->derive_adjustment),
6142 &ext_val_dummy, &dummy);
6145 giv->derive_adjustment = tem;
6147 giv->cant_derive = 1;
6149 else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
6150 || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
6151 giv->cant_derive = 1;
6156 /* Check whether an insn is an increment legitimate for a basic induction var.
6157 X is the source of insn P, or a part of it.
6158 MODE is the mode in which X should be interpreted.
6160 DEST_REG is the putative biv, also the destination of the insn.
6161 We accept patterns of these forms:
6162 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
6163 REG = INVARIANT + REG
6165 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
6166 store the additive term into *INC_VAL, and store the place where
6167 we found the additive term into *LOCATION.
6169 If X is an assignment of an invariant into DEST_REG, we set
6170 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
6172 We also want to detect a BIV when it corresponds to a variable
6173 whose mode was promoted via PROMOTED_MODE. In that case, an increment
6174 of the variable may be a PLUS that adds a SUBREG of that variable to
6175 an invariant and then sign- or zero-extends the result of the PLUS
6178 Most GIVs in such cases will be in the promoted mode, since that is the
6179 probably the natural computation mode (and almost certainly the mode
6180 used for addresses) on the machine. So we view the pseudo-reg containing
6181 the variable as the BIV, as if it were simply incremented.
6183 Note that treating the entire pseudo as a BIV will result in making
6184 simple increments to any GIVs based on it. However, if the variable
6185 overflows in its declared mode but not its promoted mode, the result will
6186 be incorrect. This is acceptable if the variable is signed, since
6187 overflows in such cases are undefined, but not if it is unsigned, since
6188 those overflows are defined. So we only check for SIGN_EXTEND and
6191 If we cannot find a biv, we return 0. */
6194 basic_induction_var (const struct loop *loop, rtx x, enum machine_mode mode,
6195 rtx dest_reg, rtx p, rtx *inc_val, rtx *mult_val,
6200 rtx insn, set = 0, last, inc;
6202 code = GET_CODE (x);
6207 if (rtx_equal_p (XEXP (x, 0), dest_reg)
6208 || (GET_CODE (XEXP (x, 0)) == SUBREG
6209 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
6210 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
6212 argp = &XEXP (x, 1);
6214 else if (rtx_equal_p (XEXP (x, 1), dest_reg)
6215 || (GET_CODE (XEXP (x, 1)) == SUBREG
6216 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
6217 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
6219 argp = &XEXP (x, 0);
6225 if (loop_invariant_p (loop, arg) != 1)
6228 /* convert_modes can emit new instructions, e.g. when arg is a loop
6229 invariant MEM and dest_reg has a different mode.
6230 These instructions would be emitted after the end of the function
6231 and then *inc_val would be an unitialized pseudo.
6232 Detect this and bail in this case.
6233 Other alternatives to solve this can be introducing a convert_modes
6234 variant which is allowed to fail but not allowed to emit new
6235 instructions, emit these instructions before loop start and let
6236 it be garbage collected if *inc_val is never used or saving the
6237 *inc_val initialization sequence generated here and when *inc_val
6238 is going to be actually used, emit it at some suitable place. */
6239 last = get_last_insn ();
6240 inc = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
6241 if (get_last_insn () != last)
6243 delete_insns_since (last);
6248 *mult_val = const1_rtx;
6253 /* If what's inside the SUBREG is a BIV, then the SUBREG. This will
6254 handle addition of promoted variables.
6255 ??? The comment at the start of this function is wrong: promoted
6256 variable increments don't look like it says they do. */
6257 return basic_induction_var (loop, SUBREG_REG (x),
6258 GET_MODE (SUBREG_REG (x)),
6259 dest_reg, p, inc_val, mult_val, location);
6262 /* If this register is assigned in a previous insn, look at its
6263 source, but don't go outside the loop or past a label. */
6265 /* If this sets a register to itself, we would repeat any previous
6266 biv increment if we applied this strategy blindly. */
6267 if (rtx_equal_p (dest_reg, x))
6276 insn = PREV_INSN (insn);
6278 while (insn && GET_CODE (insn) == NOTE
6279 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6283 set = single_set (insn);
6286 dest = SET_DEST (set);
6288 || (GET_CODE (dest) == SUBREG
6289 && (GET_MODE_SIZE (GET_MODE (dest)) <= UNITS_PER_WORD)
6290 && (GET_MODE_CLASS (GET_MODE (dest)) == MODE_INT)
6291 && SUBREG_REG (dest) == x))
6292 return basic_induction_var (loop, SET_SRC (set),
6293 (GET_MODE (SET_SRC (set)) == VOIDmode
6295 : GET_MODE (SET_SRC (set))),
6297 inc_val, mult_val, location);
6299 while (GET_CODE (dest) == SIGN_EXTRACT
6300 || GET_CODE (dest) == ZERO_EXTRACT
6301 || GET_CODE (dest) == SUBREG
6302 || GET_CODE (dest) == STRICT_LOW_PART)
6303 dest = XEXP (dest, 0);
6309 /* Can accept constant setting of biv only when inside inner most loop.
6310 Otherwise, a biv of an inner loop may be incorrectly recognized
6311 as a biv of the outer loop,
6312 causing code to be moved INTO the inner loop. */
6314 if (loop_invariant_p (loop, x) != 1)
6319 /* convert_modes aborts if we try to convert to or from CCmode, so just
6320 exclude that case. It is very unlikely that a condition code value
6321 would be a useful iterator anyways. convert_modes aborts if we try to
6322 convert a float mode to non-float or vice versa too. */
6323 if (loop->level == 1
6324 && GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (dest_reg))
6325 && GET_MODE_CLASS (mode) != MODE_CC)
6327 /* Possible bug here? Perhaps we don't know the mode of X. */
6328 last = get_last_insn ();
6329 inc = convert_modes (GET_MODE (dest_reg), mode, x, 0);
6330 if (get_last_insn () != last)
6332 delete_insns_since (last);
6337 *mult_val = const0_rtx;
6344 /* Ignore this BIV if signed arithmetic overflow is defined. */
6347 return basic_induction_var (loop, XEXP (x, 0), GET_MODE (XEXP (x, 0)),
6348 dest_reg, p, inc_val, mult_val, location);
6351 /* Similar, since this can be a sign extension. */
6352 for (insn = PREV_INSN (p);
6353 (insn && GET_CODE (insn) == NOTE
6354 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6355 insn = PREV_INSN (insn))
6359 set = single_set (insn);
6361 if (! rtx_equal_p (dest_reg, XEXP (x, 0))
6362 && set && SET_DEST (set) == XEXP (x, 0)
6363 && GET_CODE (XEXP (x, 1)) == CONST_INT
6364 && INTVAL (XEXP (x, 1)) >= 0
6365 && GET_CODE (SET_SRC (set)) == ASHIFT
6366 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
6367 return basic_induction_var (loop, XEXP (SET_SRC (set), 0),
6368 GET_MODE (XEXP (x, 0)),
6369 dest_reg, insn, inc_val, mult_val,
6378 /* A general induction variable (giv) is any quantity that is a linear
6379 function of a basic induction variable,
6380 i.e. giv = biv * mult_val + add_val.
6381 The coefficients can be any loop invariant quantity.
6382 A giv need not be computed directly from the biv;
6383 it can be computed by way of other givs. */
6385 /* Determine whether X computes a giv.
6386 If it does, return a nonzero value
6387 which is the benefit from eliminating the computation of X;
6388 set *SRC_REG to the register of the biv that it is computed from;
6389 set *ADD_VAL and *MULT_VAL to the coefficients,
6390 such that the value of X is biv * mult + add; */
6393 general_induction_var (const struct loop *loop, rtx x, rtx *src_reg,
6394 rtx *add_val, rtx *mult_val, rtx *ext_val,
6395 int is_addr, int *pbenefit,
6396 enum machine_mode addr_mode)
6398 struct loop_ivs *ivs = LOOP_IVS (loop);
6401 /* If this is an invariant, forget it, it isn't a giv. */
6402 if (loop_invariant_p (loop, x) == 1)
6406 *ext_val = NULL_RTX;
6407 x = simplify_giv_expr (loop, x, ext_val, pbenefit);
6411 switch (GET_CODE (x))
6415 /* Since this is now an invariant and wasn't before, it must be a giv
6416 with MULT_VAL == 0. It doesn't matter which BIV we associate this
6418 *src_reg = ivs->list->biv->dest_reg;
6419 *mult_val = const0_rtx;
6424 /* This is equivalent to a BIV. */
6426 *mult_val = const1_rtx;
6427 *add_val = const0_rtx;
6431 /* Either (plus (biv) (invar)) or
6432 (plus (mult (biv) (invar_1)) (invar_2)). */
6433 if (GET_CODE (XEXP (x, 0)) == MULT)
6435 *src_reg = XEXP (XEXP (x, 0), 0);
6436 *mult_val = XEXP (XEXP (x, 0), 1);
6440 *src_reg = XEXP (x, 0);
6441 *mult_val = const1_rtx;
6443 *add_val = XEXP (x, 1);
6447 /* ADD_VAL is zero. */
6448 *src_reg = XEXP (x, 0);
6449 *mult_val = XEXP (x, 1);
6450 *add_val = const0_rtx;
6457 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
6458 unless they are CONST_INT). */
6459 if (GET_CODE (*add_val) == USE)
6460 *add_val = XEXP (*add_val, 0);
6461 if (GET_CODE (*mult_val) == USE)
6462 *mult_val = XEXP (*mult_val, 0);
6465 *pbenefit += address_cost (orig_x, addr_mode) - reg_address_cost;
6467 *pbenefit += rtx_cost (orig_x, SET);
6469 /* Always return true if this is a giv so it will be detected as such,
6470 even if the benefit is zero or negative. This allows elimination
6471 of bivs that might otherwise not be eliminated. */
6475 /* Given an expression, X, try to form it as a linear function of a biv.
6476 We will canonicalize it to be of the form
6477 (plus (mult (BIV) (invar_1))
6479 with possible degeneracies.
6481 The invariant expressions must each be of a form that can be used as a
6482 machine operand. We surround then with a USE rtx (a hack, but localized
6483 and certainly unambiguous!) if not a CONST_INT for simplicity in this
6484 routine; it is the caller's responsibility to strip them.
6486 If no such canonicalization is possible (i.e., two biv's are used or an
6487 expression that is neither invariant nor a biv or giv), this routine
6490 For a nonzero return, the result will have a code of CONST_INT, USE,
6491 REG (for a BIV), PLUS, or MULT. No other codes will occur.
6493 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
6495 static rtx sge_plus (enum machine_mode, rtx, rtx);
6496 static rtx sge_plus_constant (rtx, rtx);
6499 simplify_giv_expr (const struct loop *loop, rtx x, rtx *ext_val, int *benefit)
6501 struct loop_ivs *ivs = LOOP_IVS (loop);
6502 struct loop_regs *regs = LOOP_REGS (loop);
6503 enum machine_mode mode = GET_MODE (x);
6507 /* If this is not an integer mode, or if we cannot do arithmetic in this
6508 mode, this can't be a giv. */
6509 if (mode != VOIDmode
6510 && (GET_MODE_CLASS (mode) != MODE_INT
6511 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
6514 switch (GET_CODE (x))
6517 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6518 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
6519 if (arg0 == 0 || arg1 == 0)
6522 /* Put constant last, CONST_INT last if both constant. */
6523 if ((GET_CODE (arg0) == USE
6524 || GET_CODE (arg0) == CONST_INT)
6525 && ! ((GET_CODE (arg0) == USE
6526 && GET_CODE (arg1) == USE)
6527 || GET_CODE (arg1) == CONST_INT))
6528 tem = arg0, arg0 = arg1, arg1 = tem;
6530 /* Handle addition of zero, then addition of an invariant. */
6531 if (arg1 == const0_rtx)
6533 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
6534 switch (GET_CODE (arg0))
6538 /* Adding two invariants must result in an invariant, so enclose
6539 addition operation inside a USE and return it. */
6540 if (GET_CODE (arg0) == USE)
6541 arg0 = XEXP (arg0, 0);
6542 if (GET_CODE (arg1) == USE)
6543 arg1 = XEXP (arg1, 0);
6545 if (GET_CODE (arg0) == CONST_INT)
6546 tem = arg0, arg0 = arg1, arg1 = tem;
6547 if (GET_CODE (arg1) == CONST_INT)
6548 tem = sge_plus_constant (arg0, arg1);
6550 tem = sge_plus (mode, arg0, arg1);
6552 if (GET_CODE (tem) != CONST_INT)
6553 tem = gen_rtx_USE (mode, tem);
6558 /* biv + invar or mult + invar. Return sum. */
6559 return gen_rtx_PLUS (mode, arg0, arg1);
6562 /* (a + invar_1) + invar_2. Associate. */
6564 simplify_giv_expr (loop,
6576 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
6577 MULT to reduce cases. */
6578 if (GET_CODE (arg0) == REG)
6579 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
6580 if (GET_CODE (arg1) == REG)
6581 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
6583 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
6584 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
6585 Recurse to associate the second PLUS. */
6586 if (GET_CODE (arg1) == MULT)
6587 tem = arg0, arg0 = arg1, arg1 = tem;
6589 if (GET_CODE (arg1) == PLUS)
6591 simplify_giv_expr (loop,
6593 gen_rtx_PLUS (mode, arg0,
6598 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
6599 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
6602 if (!rtx_equal_p (arg0, arg1))
6605 return simplify_giv_expr (loop,
6614 /* Handle "a - b" as "a + b * (-1)". */
6615 return simplify_giv_expr (loop,
6624 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6625 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
6626 if (arg0 == 0 || arg1 == 0)
6629 /* Put constant last, CONST_INT last if both constant. */
6630 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
6631 && GET_CODE (arg1) != CONST_INT)
6632 tem = arg0, arg0 = arg1, arg1 = tem;
6634 /* If second argument is not now constant, not giv. */
6635 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
6638 /* Handle multiply by 0 or 1. */
6639 if (arg1 == const0_rtx)
6642 else if (arg1 == const1_rtx)
6645 switch (GET_CODE (arg0))
6648 /* biv * invar. Done. */
6649 return gen_rtx_MULT (mode, arg0, arg1);
6652 /* Product of two constants. */
6653 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
6656 /* invar * invar is a giv, but attempt to simplify it somehow. */
6657 if (GET_CODE (arg1) != CONST_INT)
6660 arg0 = XEXP (arg0, 0);
6661 if (GET_CODE (arg0) == MULT)
6663 /* (invar_0 * invar_1) * invar_2. Associate. */
6664 return simplify_giv_expr (loop,
6673 /* Propagate the MULT expressions to the intermost nodes. */
6674 else if (GET_CODE (arg0) == PLUS)
6676 /* (invar_0 + invar_1) * invar_2. Distribute. */
6677 return simplify_giv_expr (loop,
6689 return gen_rtx_USE (mode, gen_rtx_MULT (mode, arg0, arg1));
6692 /* (a * invar_1) * invar_2. Associate. */
6693 return simplify_giv_expr (loop,
6702 /* (a + invar_1) * invar_2. Distribute. */
6703 return simplify_giv_expr (loop,
6718 /* Shift by constant is multiply by power of two. */
6719 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
6723 simplify_giv_expr (loop,
6726 GEN_INT ((HOST_WIDE_INT) 1
6727 << INTVAL (XEXP (x, 1)))),
6731 /* "-a" is "a * (-1)" */
6732 return simplify_giv_expr (loop,
6733 gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
6737 /* "~a" is "-a - 1". Silly, but easy. */
6738 return simplify_giv_expr (loop,
6739 gen_rtx_MINUS (mode,
6740 gen_rtx_NEG (mode, XEXP (x, 0)),
6745 /* Already in proper form for invariant. */
6751 /* Conditionally recognize extensions of simple IVs. After we've
6752 computed loop traversal counts and verified the range of the
6753 source IV, we'll reevaluate this as a GIV. */
6754 if (*ext_val == NULL_RTX)
6756 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6757 if (arg0 && *ext_val == NULL_RTX && GET_CODE (arg0) == REG)
6759 *ext_val = gen_rtx_fmt_e (GET_CODE (x), mode, arg0);
6766 /* If this is a new register, we can't deal with it. */
6767 if (REGNO (x) >= max_reg_before_loop)
6770 /* Check for biv or giv. */
6771 switch (REG_IV_TYPE (ivs, REGNO (x)))
6775 case GENERAL_INDUCT:
6777 struct induction *v = REG_IV_INFO (ivs, REGNO (x));
6779 /* Form expression from giv and add benefit. Ensure this giv
6780 can derive another and subtract any needed adjustment if so. */
6782 /* Increasing the benefit here is risky. The only case in which it
6783 is arguably correct is if this is the only use of V. In other
6784 cases, this will artificially inflate the benefit of the current
6785 giv, and lead to suboptimal code. Thus, it is disabled, since
6786 potentially not reducing an only marginally beneficial giv is
6787 less harmful than reducing many givs that are not really
6790 rtx single_use = regs->array[REGNO (x)].single_usage;
6791 if (single_use && single_use != const0_rtx)
6792 *benefit += v->benefit;
6798 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode,
6799 v->src_reg, v->mult_val),
6802 if (v->derive_adjustment)
6803 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
6804 arg0 = simplify_giv_expr (loop, tem, ext_val, benefit);
6807 if (!v->ext_dependent)
6812 *ext_val = v->ext_dependent;
6820 /* If it isn't an induction variable, and it is invariant, we
6821 may be able to simplify things further by looking through
6822 the bits we just moved outside the loop. */
6823 if (loop_invariant_p (loop, x) == 1)
6826 struct loop_movables *movables = LOOP_MOVABLES (loop);
6828 for (m = movables->head; m; m = m->next)
6829 if (rtx_equal_p (x, m->set_dest))
6831 /* Ok, we found a match. Substitute and simplify. */
6833 /* If we match another movable, we must use that, as
6834 this one is going away. */
6836 return simplify_giv_expr (loop, m->match->set_dest,
6839 /* If consec is nonzero, this is a member of a group of
6840 instructions that were moved together. We handle this
6841 case only to the point of seeking to the last insn and
6842 looking for a REG_EQUAL. Fail if we don't find one. */
6849 tem = NEXT_INSN (tem);
6853 tem = find_reg_note (tem, REG_EQUAL, NULL_RTX);
6855 tem = XEXP (tem, 0);
6859 tem = single_set (m->insn);
6861 tem = SET_SRC (tem);
6866 /* What we are most interested in is pointer
6867 arithmetic on invariants -- only take
6868 patterns we may be able to do something with. */
6869 if (GET_CODE (tem) == PLUS
6870 || GET_CODE (tem) == MULT
6871 || GET_CODE (tem) == ASHIFT
6872 || GET_CODE (tem) == CONST_INT
6873 || GET_CODE (tem) == SYMBOL_REF)
6875 tem = simplify_giv_expr (loop, tem, ext_val,
6880 else if (GET_CODE (tem) == CONST
6881 && GET_CODE (XEXP (tem, 0)) == PLUS
6882 && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
6883 && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
6885 tem = simplify_giv_expr (loop, XEXP (tem, 0),
6897 /* Fall through to general case. */
6899 /* If invariant, return as USE (unless CONST_INT).
6900 Otherwise, not giv. */
6901 if (GET_CODE (x) == USE)
6904 if (loop_invariant_p (loop, x) == 1)
6906 if (GET_CODE (x) == CONST_INT)
6908 if (GET_CODE (x) == CONST
6909 && GET_CODE (XEXP (x, 0)) == PLUS
6910 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
6911 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
6913 return gen_rtx_USE (mode, x);
6920 /* This routine folds invariants such that there is only ever one
6921 CONST_INT in the summation. It is only used by simplify_giv_expr. */
6924 sge_plus_constant (rtx x, rtx c)
6926 if (GET_CODE (x) == CONST_INT)
6927 return GEN_INT (INTVAL (x) + INTVAL (c));
6928 else if (GET_CODE (x) != PLUS)
6929 return gen_rtx_PLUS (GET_MODE (x), x, c);
6930 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
6932 return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
6933 GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c)));
6935 else if (GET_CODE (XEXP (x, 0)) == PLUS
6936 || GET_CODE (XEXP (x, 1)) != PLUS)
6938 return gen_rtx_PLUS (GET_MODE (x),
6939 sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1));
6943 return gen_rtx_PLUS (GET_MODE (x),
6944 sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0));
6949 sge_plus (enum machine_mode mode, rtx x, rtx y)
6951 while (GET_CODE (y) == PLUS)
6953 rtx a = XEXP (y, 0);
6954 if (GET_CODE (a) == CONST_INT)
6955 x = sge_plus_constant (x, a);
6957 x = gen_rtx_PLUS (mode, x, a);
6960 if (GET_CODE (y) == CONST_INT)
6961 x = sge_plus_constant (x, y);
6963 x = gen_rtx_PLUS (mode, x, y);
6967 /* Help detect a giv that is calculated by several consecutive insns;
6971 The caller has already identified the first insn P as having a giv as dest;
6972 we check that all other insns that set the same register follow
6973 immediately after P, that they alter nothing else,
6974 and that the result of the last is still a giv.
6976 The value is 0 if the reg set in P is not really a giv.
6977 Otherwise, the value is the amount gained by eliminating
6978 all the consecutive insns that compute the value.
6980 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
6981 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
6983 The coefficients of the ultimate giv value are stored in
6984 *MULT_VAL and *ADD_VAL. */
6987 consec_sets_giv (const struct loop *loop, int first_benefit, rtx p,
6988 rtx src_reg, rtx dest_reg, rtx *add_val, rtx *mult_val,
6989 rtx *ext_val, rtx *last_consec_insn)
6991 struct loop_ivs *ivs = LOOP_IVS (loop);
6992 struct loop_regs *regs = LOOP_REGS (loop);
6999 /* Indicate that this is a giv so that we can update the value produced in
7000 each insn of the multi-insn sequence.
7002 This induction structure will be used only by the call to
7003 general_induction_var below, so we can allocate it on our stack.
7004 If this is a giv, our caller will replace the induct var entry with
7005 a new induction structure. */
7006 struct induction *v;
7008 if (REG_IV_TYPE (ivs, REGNO (dest_reg)) != UNKNOWN_INDUCT)
7011 v = (struct induction *) alloca (sizeof (struct induction));
7012 v->src_reg = src_reg;
7013 v->mult_val = *mult_val;
7014 v->add_val = *add_val;
7015 v->benefit = first_benefit;
7017 v->derive_adjustment = 0;
7018 v->ext_dependent = NULL_RTX;
7020 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
7021 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
7023 count = regs->array[REGNO (dest_reg)].n_times_set - 1;
7028 code = GET_CODE (p);
7030 /* If libcall, skip to end of call sequence. */
7031 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
7035 && (set = single_set (p))
7036 && GET_CODE (SET_DEST (set)) == REG
7037 && SET_DEST (set) == dest_reg
7038 && (general_induction_var (loop, SET_SRC (set), &src_reg,
7039 add_val, mult_val, ext_val, 0,
7041 /* Giv created by equivalent expression. */
7042 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
7043 && general_induction_var (loop, XEXP (temp, 0), &src_reg,
7044 add_val, mult_val, ext_val, 0,
7045 &benefit, VOIDmode)))
7046 && src_reg == v->src_reg)
7048 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
7049 benefit += libcall_benefit (p);
7052 v->mult_val = *mult_val;
7053 v->add_val = *add_val;
7054 v->benefit += benefit;
7056 else if (code != NOTE)
7058 /* Allow insns that set something other than this giv to a
7059 constant. Such insns are needed on machines which cannot
7060 include long constants and should not disqualify a giv. */
7062 && (set = single_set (p))
7063 && SET_DEST (set) != dest_reg
7064 && CONSTANT_P (SET_SRC (set)))
7067 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
7072 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
7073 *last_consec_insn = p;
7077 /* Return an rtx, if any, that expresses giv G2 as a function of the register
7078 represented by G1. If no such expression can be found, or it is clear that
7079 it cannot possibly be a valid address, 0 is returned.
7081 To perform the computation, we note that
7084 where `v' is the biv.
7086 So G2 = (y/b) * G1 + (b - a*y/x).
7088 Note that MULT = y/x.
7090 Update: A and B are now allowed to be additive expressions such that
7091 B contains all variables in A. That is, computing B-A will not require
7092 subtracting variables. */
7095 express_from_1 (rtx a, rtx b, rtx mult)
7097 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
7099 if (mult == const0_rtx)
7102 /* If MULT is not 1, we cannot handle A with non-constants, since we
7103 would then be required to subtract multiples of the registers in A.
7104 This is theoretically possible, and may even apply to some Fortran
7105 constructs, but it is a lot of work and we do not attempt it here. */
7107 if (mult != const1_rtx && GET_CODE (a) != CONST_INT)
7110 /* In general these structures are sorted top to bottom (down the PLUS
7111 chain), but not left to right across the PLUS. If B is a higher
7112 order giv than A, we can strip one level and recurse. If A is higher
7113 order, we'll eventually bail out, but won't know that until the end.
7114 If they are the same, we'll strip one level around this loop. */
7116 while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS)
7118 rtx ra, rb, oa, ob, tmp;
7120 ra = XEXP (a, 0), oa = XEXP (a, 1);
7121 if (GET_CODE (ra) == PLUS)
7122 tmp = ra, ra = oa, oa = tmp;
7124 rb = XEXP (b, 0), ob = XEXP (b, 1);
7125 if (GET_CODE (rb) == PLUS)
7126 tmp = rb, rb = ob, ob = tmp;
7128 if (rtx_equal_p (ra, rb))
7129 /* We matched: remove one reg completely. */
7131 else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob))
7132 /* An alternate match. */
7134 else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb))
7135 /* An alternate match. */
7139 /* Indicates an extra register in B. Strip one level from B and
7140 recurse, hoping B was the higher order expression. */
7141 ob = express_from_1 (a, ob, mult);
7144 return gen_rtx_PLUS (GET_MODE (b), rb, ob);
7148 /* Here we are at the last level of A, go through the cases hoping to
7149 get rid of everything but a constant. */
7151 if (GET_CODE (a) == PLUS)
7155 ra = XEXP (a, 0), oa = XEXP (a, 1);
7156 if (rtx_equal_p (oa, b))
7158 else if (!rtx_equal_p (ra, b))
7161 if (GET_CODE (oa) != CONST_INT)
7164 return GEN_INT (-INTVAL (oa) * INTVAL (mult));
7166 else if (GET_CODE (a) == CONST_INT)
7168 return plus_constant (b, -INTVAL (a) * INTVAL (mult));
7170 else if (CONSTANT_P (a))
7172 enum machine_mode mode_a = GET_MODE (a);
7173 enum machine_mode mode_b = GET_MODE (b);
7174 enum machine_mode mode = mode_b == VOIDmode ? mode_a : mode_b;
7175 return simplify_gen_binary (MINUS, mode, b, a);
7177 else if (GET_CODE (b) == PLUS)
7179 if (rtx_equal_p (a, XEXP (b, 0)))
7181 else if (rtx_equal_p (a, XEXP (b, 1)))
7186 else if (rtx_equal_p (a, b))
7193 express_from (struct induction *g1, struct induction *g2)
7197 /* The value that G1 will be multiplied by must be a constant integer. Also,
7198 the only chance we have of getting a valid address is if b*c/a (see above
7199 for notation) is also an integer. */
7200 if (GET_CODE (g1->mult_val) == CONST_INT
7201 && GET_CODE (g2->mult_val) == CONST_INT)
7203 if (g1->mult_val == const0_rtx
7204 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
7206 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
7208 else if (rtx_equal_p (g1->mult_val, g2->mult_val))
7212 /* ??? Find out if the one is a multiple of the other? */
7216 add = express_from_1 (g1->add_val, g2->add_val, mult);
7217 if (add == NULL_RTX)
7219 /* Failed. If we've got a multiplication factor between G1 and G2,
7220 scale G1's addend and try again. */
7221 if (INTVAL (mult) > 1)
7223 rtx g1_add_val = g1->add_val;
7224 if (GET_CODE (g1_add_val) == MULT
7225 && GET_CODE (XEXP (g1_add_val, 1)) == CONST_INT)
7228 m = INTVAL (mult) * INTVAL (XEXP (g1_add_val, 1));
7229 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val),
7230 XEXP (g1_add_val, 0), GEN_INT (m));
7234 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), g1_add_val,
7238 add = express_from_1 (g1_add_val, g2->add_val, const1_rtx);
7241 if (add == NULL_RTX)
7244 /* Form simplified final result. */
7245 if (mult == const0_rtx)
7247 else if (mult == const1_rtx)
7248 mult = g1->dest_reg;
7250 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
7252 if (add == const0_rtx)
7256 if (GET_CODE (add) == PLUS
7257 && CONSTANT_P (XEXP (add, 1)))
7259 rtx tem = XEXP (add, 1);
7260 mult = gen_rtx_PLUS (g2->mode, mult, XEXP (add, 0));
7264 return gen_rtx_PLUS (g2->mode, mult, add);
7268 /* Return an rtx, if any, that expresses giv G2 as a function of the register
7269 represented by G1. This indicates that G2 should be combined with G1 and
7270 that G2 can use (either directly or via an address expression) a register
7271 used to represent G1. */
7274 combine_givs_p (struct induction *g1, struct induction *g2)
7278 /* With the introduction of ext dependent givs, we must care for modes.
7279 G2 must not use a wider mode than G1. */
7280 if (GET_MODE_SIZE (g1->mode) < GET_MODE_SIZE (g2->mode))
7283 ret = comb = express_from (g1, g2);
7284 if (comb == NULL_RTX)
7286 if (g1->mode != g2->mode)
7287 ret = gen_lowpart (g2->mode, comb);
7289 /* If these givs are identical, they can be combined. We use the results
7290 of express_from because the addends are not in a canonical form, so
7291 rtx_equal_p is a weaker test. */
7292 /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the
7293 combination to be the other way round. */
7294 if (comb == g1->dest_reg
7295 && (g1->giv_type == DEST_REG || g2->giv_type == DEST_ADDR))
7300 /* If G2 can be expressed as a function of G1 and that function is valid
7301 as an address and no more expensive than using a register for G2,
7302 the expression of G2 in terms of G1 can be used. */
7304 && g2->giv_type == DEST_ADDR
7305 && memory_address_p (GET_MODE (g2->mem), ret))
7311 /* Check each extension dependent giv in this class to see if its
7312 root biv is safe from wrapping in the interior mode, which would
7313 make the giv illegal. */
7316 check_ext_dependent_givs (const struct loop *loop, struct iv_class *bl)
7318 struct loop_info *loop_info = LOOP_INFO (loop);
7319 int ze_ok = 0, se_ok = 0, info_ok = 0;
7320 enum machine_mode biv_mode = GET_MODE (bl->biv->src_reg);
7321 HOST_WIDE_INT start_val;
7322 unsigned HOST_WIDE_INT u_end_val = 0;
7323 unsigned HOST_WIDE_INT u_start_val = 0;
7325 struct induction *v;
7327 /* Make sure the iteration data is available. We must have
7328 constants in order to be certain of no overflow. */
7329 if (loop_info->n_iterations > 0
7330 && bl->initial_value
7331 && GET_CODE (bl->initial_value) == CONST_INT
7332 && (incr = biv_total_increment (bl))
7333 && GET_CODE (incr) == CONST_INT
7334 /* Make sure the host can represent the arithmetic. */
7335 && HOST_BITS_PER_WIDE_INT >= GET_MODE_BITSIZE (biv_mode))
7337 unsigned HOST_WIDE_INT abs_incr, total_incr;
7338 HOST_WIDE_INT s_end_val;
7342 start_val = INTVAL (bl->initial_value);
7343 u_start_val = start_val;
7345 neg_incr = 0, abs_incr = INTVAL (incr);
7346 if (INTVAL (incr) < 0)
7347 neg_incr = 1, abs_incr = -abs_incr;
7348 total_incr = abs_incr * loop_info->n_iterations;
7350 /* Check for host arithmetic overflow. */
7351 if (total_incr / loop_info->n_iterations == abs_incr)
7353 unsigned HOST_WIDE_INT u_max;
7354 HOST_WIDE_INT s_max;
7356 u_end_val = start_val + (neg_incr ? -total_incr : total_incr);
7357 s_end_val = u_end_val;
7358 u_max = GET_MODE_MASK (biv_mode);
7361 /* Check zero extension of biv ok. */
7363 /* Check for host arithmetic overflow. */
7365 ? u_end_val < u_start_val
7366 : u_end_val > u_start_val)
7367 /* Check for target arithmetic overflow. */
7369 ? 1 /* taken care of with host overflow */
7370 : u_end_val <= u_max))
7375 /* Check sign extension of biv ok. */
7376 /* ??? While it is true that overflow with signed and pointer
7377 arithmetic is undefined, I fear too many programmers don't
7378 keep this fact in mind -- myself included on occasion.
7379 So leave alone with the signed overflow optimizations. */
7380 if (start_val >= -s_max - 1
7381 /* Check for host arithmetic overflow. */
7383 ? s_end_val < start_val
7384 : s_end_val > start_val)
7385 /* Check for target arithmetic overflow. */
7387 ? s_end_val >= -s_max - 1
7388 : s_end_val <= s_max))
7395 /* If we know the BIV is compared at run-time against an
7396 invariant value, and the increment is +/- 1, we may also
7397 be able to prove that the BIV cannot overflow. */
7398 else if (bl->biv->src_reg == loop_info->iteration_var
7399 && loop_info->comparison_value
7400 && loop_invariant_p (loop, loop_info->comparison_value)
7401 && (incr = biv_total_increment (bl))
7402 && GET_CODE (incr) == CONST_INT)
7404 /* If the increment is +1, and the exit test is a <,
7405 the BIV cannot overflow. (For <=, we have the
7406 problematic case that the comparison value might
7407 be the maximum value of the range.) */
7408 if (INTVAL (incr) == 1)
7410 if (loop_info->comparison_code == LT)
7412 else if (loop_info->comparison_code == LTU)
7416 /* Likewise for increment -1 and exit test >. */
7417 if (INTVAL (incr) == -1)
7419 if (loop_info->comparison_code == GT)
7421 else if (loop_info->comparison_code == GTU)
7426 /* Invalidate givs that fail the tests. */
7427 for (v = bl->giv; v; v = v->next_iv)
7428 if (v->ext_dependent)
7430 enum rtx_code code = GET_CODE (v->ext_dependent);
7443 /* We don't know whether this value is being used as either
7444 signed or unsigned, so to safely truncate we must satisfy
7445 both. The initial check here verifies the BIV itself;
7446 once that is successful we may check its range wrt the
7447 derived GIV. This works only if we were able to determine
7448 constant start and end values above. */
7449 if (se_ok && ze_ok && info_ok)
7451 enum machine_mode outer_mode = GET_MODE (v->ext_dependent);
7452 unsigned HOST_WIDE_INT max = GET_MODE_MASK (outer_mode) >> 1;
7454 /* We know from the above that both endpoints are nonnegative,
7455 and that there is no wrapping. Verify that both endpoints
7456 are within the (signed) range of the outer mode. */
7457 if (u_start_val <= max && u_end_val <= max)
7468 if (loop_dump_stream)
7470 fprintf (loop_dump_stream,
7471 "Verified ext dependent giv at %d of reg %d\n",
7472 INSN_UID (v->insn), bl->regno);
7477 if (loop_dump_stream)
7482 why = "biv iteration values overflowed";
7486 incr = biv_total_increment (bl);
7487 if (incr == const1_rtx)
7488 why = "biv iteration info incomplete; incr by 1";
7490 why = "biv iteration info incomplete";
7493 fprintf (loop_dump_stream,
7494 "Failed ext dependent giv at %d, %s\n",
7495 INSN_UID (v->insn), why);
7498 bl->all_reduced = 0;
7503 /* Generate a version of VALUE in a mode appropriate for initializing V. */
7506 extend_value_for_giv (struct induction *v, rtx value)
7508 rtx ext_dep = v->ext_dependent;
7513 /* Recall that check_ext_dependent_givs verified that the known bounds
7514 of a biv did not overflow or wrap with respect to the extension for
7515 the giv. Therefore, constants need no additional adjustment. */
7516 if (CONSTANT_P (value) && GET_MODE (value) == VOIDmode)
7519 /* Otherwise, we must adjust the value to compensate for the
7520 differing modes of the biv and the giv. */
7521 return gen_rtx_fmt_e (GET_CODE (ext_dep), GET_MODE (ext_dep), value);
7524 struct combine_givs_stats
7531 cmp_combine_givs_stats (const void *xp, const void *yp)
7533 const struct combine_givs_stats * const x =
7534 (const struct combine_givs_stats *) xp;
7535 const struct combine_givs_stats * const y =
7536 (const struct combine_givs_stats *) yp;
7538 d = y->total_benefit - x->total_benefit;
7539 /* Stabilize the sort. */
7541 d = x->giv_number - y->giv_number;
7545 /* Check all pairs of givs for iv_class BL and see if any can be combined with
7546 any other. If so, point SAME to the giv combined with and set NEW_REG to
7547 be an expression (in terms of the other giv's DEST_REG) equivalent to the
7548 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
7551 combine_givs (struct loop_regs *regs, struct iv_class *bl)
7553 /* Additional benefit to add for being combined multiple times. */
7554 const int extra_benefit = 3;
7556 struct induction *g1, *g2, **giv_array;
7557 int i, j, k, giv_count;
7558 struct combine_givs_stats *stats;
7561 /* Count givs, because bl->giv_count is incorrect here. */
7563 for (g1 = bl->giv; g1; g1 = g1->next_iv)
7568 = (struct induction **) alloca (giv_count * sizeof (struct induction *));
7570 for (g1 = bl->giv; g1; g1 = g1->next_iv)
7572 giv_array[i++] = g1;
7574 stats = (struct combine_givs_stats *) xcalloc (giv_count, sizeof (*stats));
7575 can_combine = (rtx *) xcalloc (giv_count, giv_count * sizeof (rtx));
7577 for (i = 0; i < giv_count; i++)
7583 stats[i].giv_number = i;
7585 /* If a DEST_REG GIV is used only once, do not allow it to combine
7586 with anything, for in doing so we will gain nothing that cannot
7587 be had by simply letting the GIV with which we would have combined
7588 to be reduced on its own. The losage shows up in particular with
7589 DEST_ADDR targets on hosts with reg+reg addressing, though it can
7590 be seen elsewhere as well. */
7591 if (g1->giv_type == DEST_REG
7592 && (single_use = regs->array[REGNO (g1->dest_reg)].single_usage)
7593 && single_use != const0_rtx)
7596 this_benefit = g1->benefit;
7597 /* Add an additional weight for zero addends. */
7598 if (g1->no_const_addval)
7601 for (j = 0; j < giv_count; j++)
7607 && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX)
7609 can_combine[i * giv_count + j] = this_combine;
7610 this_benefit += g2->benefit + extra_benefit;
7613 stats[i].total_benefit = this_benefit;
7616 /* Iterate, combining until we can't. */
7618 qsort (stats, giv_count, sizeof (*stats), cmp_combine_givs_stats);
7620 if (loop_dump_stream)
7622 fprintf (loop_dump_stream, "Sorted combine statistics:\n");
7623 for (k = 0; k < giv_count; k++)
7625 g1 = giv_array[stats[k].giv_number];
7626 if (!g1->combined_with && !g1->same)
7627 fprintf (loop_dump_stream, " {%d, %d}",
7628 INSN_UID (giv_array[stats[k].giv_number]->insn),
7629 stats[k].total_benefit);
7631 putc ('\n', loop_dump_stream);
7634 for (k = 0; k < giv_count; k++)
7636 int g1_add_benefit = 0;
7638 i = stats[k].giv_number;
7641 /* If it has already been combined, skip. */
7642 if (g1->combined_with || g1->same)
7645 for (j = 0; j < giv_count; j++)
7648 if (g1 != g2 && can_combine[i * giv_count + j]
7649 /* If it has already been combined, skip. */
7650 && ! g2->same && ! g2->combined_with)
7654 g2->new_reg = can_combine[i * giv_count + j];
7656 /* For destination, we now may replace by mem expression instead
7657 of register. This changes the costs considerably, so add the
7659 if (g2->giv_type == DEST_ADDR)
7660 g2->benefit = (g2->benefit + reg_address_cost
7661 - address_cost (g2->new_reg,
7662 GET_MODE (g2->mem)));
7663 g1->combined_with++;
7664 g1->lifetime += g2->lifetime;
7666 g1_add_benefit += g2->benefit;
7668 /* ??? The new final_[bg]iv_value code does a much better job
7669 of finding replaceable giv's, and hence this code may no
7670 longer be necessary. */
7671 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
7672 g1_add_benefit -= copy_cost;
7674 /* To help optimize the next set of combinations, remove
7675 this giv from the benefits of other potential mates. */
7676 for (l = 0; l < giv_count; ++l)
7678 int m = stats[l].giv_number;
7679 if (can_combine[m * giv_count + j])
7680 stats[l].total_benefit -= g2->benefit + extra_benefit;
7683 if (loop_dump_stream)
7684 fprintf (loop_dump_stream,
7685 "giv at %d combined with giv at %d; new benefit %d + %d, lifetime %d\n",
7686 INSN_UID (g2->insn), INSN_UID (g1->insn),
7687 g1->benefit, g1_add_benefit, g1->lifetime);
7691 /* To help optimize the next set of combinations, remove
7692 this giv from the benefits of other potential mates. */
7693 if (g1->combined_with)
7695 for (j = 0; j < giv_count; ++j)
7697 int m = stats[j].giv_number;
7698 if (can_combine[m * giv_count + i])
7699 stats[j].total_benefit -= g1->benefit + extra_benefit;
7702 g1->benefit += g1_add_benefit;
7704 /* We've finished with this giv, and everything it touched.
7705 Restart the combination so that proper weights for the
7706 rest of the givs are properly taken into account. */
7707 /* ??? Ideally we would compact the arrays at this point, so
7708 as to not cover old ground. But sanely compacting
7709 can_combine is tricky. */
7719 /* Generate sequence for REG = B * M + A. B is the initial value of
7720 the basic induction variable, M a multiplicative constant, A an
7721 additive constant and REG the destination register. */
7724 gen_add_mult (rtx b, rtx m, rtx a, rtx reg)
7730 /* Use unsigned arithmetic. */
7731 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
7733 emit_move_insn (reg, result);
7741 /* Update registers created in insn sequence SEQ. */
7744 loop_regs_update (const struct loop *loop ATTRIBUTE_UNUSED, rtx seq)
7748 /* Update register info for alias analysis. */
7750 if (seq == NULL_RTX)
7756 while (insn != NULL_RTX)
7758 rtx set = single_set (insn);
7760 if (set && GET_CODE (SET_DEST (set)) == REG)
7761 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
7763 insn = NEXT_INSN (insn);
7766 else if (GET_CODE (seq) == SET
7767 && GET_CODE (SET_DEST (seq)) == REG)
7768 record_base_value (REGNO (SET_DEST (seq)), SET_SRC (seq), 0);
7772 /* EMIT code before BEFORE_BB/BEFORE_INSN to set REG = B * M + A. B
7773 is the initial value of the basic induction variable, M a
7774 multiplicative constant, A an additive constant and REG the
7775 destination register. */
7778 loop_iv_add_mult_emit_before (const struct loop *loop, rtx b, rtx m, rtx a,
7779 rtx reg, basic_block before_bb, rtx before_insn)
7785 loop_iv_add_mult_hoist (loop, b, m, a, reg);
7789 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7790 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7792 /* Increase the lifetime of any invariants moved further in code. */
7793 update_reg_last_use (a, before_insn);
7794 update_reg_last_use (b, before_insn);
7795 update_reg_last_use (m, before_insn);
7797 /* It is possible that the expansion created lots of new registers.
7798 Iterate over the sequence we just created and record them all. We
7799 must do this before inserting the sequence. */
7800 loop_regs_update (loop, seq);
7802 loop_insn_emit_before (loop, before_bb, before_insn, seq);
7806 /* Emit insns in loop pre-header to set REG = B * M + A. B is the
7807 initial value of the basic induction variable, M a multiplicative
7808 constant, A an additive constant and REG the destination
7812 loop_iv_add_mult_sink (const struct loop *loop, rtx b, rtx m, rtx a, rtx reg)
7816 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7817 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7819 /* Increase the lifetime of any invariants moved further in code.
7820 ???? Is this really necessary? */
7821 update_reg_last_use (a, loop->sink);
7822 update_reg_last_use (b, loop->sink);
7823 update_reg_last_use (m, loop->sink);
7825 /* It is possible that the expansion created lots of new registers.
7826 Iterate over the sequence we just created and record them all. We
7827 must do this before inserting the sequence. */
7828 loop_regs_update (loop, seq);
7830 loop_insn_sink (loop, seq);
7834 /* Emit insns after loop to set REG = B * M + A. B is the initial
7835 value of the basic induction variable, M a multiplicative constant,
7836 A an additive constant and REG the destination register. */
7839 loop_iv_add_mult_hoist (const struct loop *loop, rtx b, rtx m, rtx a, rtx reg)
7843 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7844 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7846 /* It is possible that the expansion created lots of new registers.
7847 Iterate over the sequence we just created and record them all. We
7848 must do this before inserting the sequence. */
7849 loop_regs_update (loop, seq);
7851 loop_insn_hoist (loop, seq);
7856 /* Similar to gen_add_mult, but compute cost rather than generating
7860 iv_add_mult_cost (rtx b, rtx m, rtx a, rtx reg)
7866 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
7868 emit_move_insn (reg, result);
7869 last = get_last_insn ();
7872 rtx t = single_set (last);
7874 cost += rtx_cost (SET_SRC (t), SET);
7875 last = PREV_INSN (last);
7881 /* Test whether A * B can be computed without
7882 an actual multiply insn. Value is 1 if so.
7884 ??? This function stinks because it generates a ton of wasted RTL
7885 ??? and as a result fragments GC memory to no end. There are other
7886 ??? places in the compiler which are invoked a lot and do the same
7887 ??? thing, generate wasted RTL just to see if something is possible. */
7890 product_cheap_p (rtx a, rtx b)
7895 /* If only one is constant, make it B. */
7896 if (GET_CODE (a) == CONST_INT)
7897 tmp = a, a = b, b = tmp;
7899 /* If first constant, both constant, so don't need multiply. */
7900 if (GET_CODE (a) == CONST_INT)
7903 /* If second not constant, neither is constant, so would need multiply. */
7904 if (GET_CODE (b) != CONST_INT)
7907 /* One operand is constant, so might not need multiply insn. Generate the
7908 code for the multiply and see if a call or multiply, or long sequence
7909 of insns is generated. */
7912 expand_mult (GET_MODE (a), a, b, NULL_RTX, 1);
7920 while (tmp != NULL_RTX)
7922 rtx next = NEXT_INSN (tmp);
7925 || GET_CODE (tmp) != INSN
7926 || (GET_CODE (PATTERN (tmp)) == SET
7927 && GET_CODE (SET_SRC (PATTERN (tmp))) == MULT)
7928 || (GET_CODE (PATTERN (tmp)) == PARALLEL
7929 && GET_CODE (XVECEXP (PATTERN (tmp), 0, 0)) == SET
7930 && GET_CODE (SET_SRC (XVECEXP (PATTERN (tmp), 0, 0))) == MULT))
7939 else if (GET_CODE (tmp) == SET
7940 && GET_CODE (SET_SRC (tmp)) == MULT)
7942 else if (GET_CODE (tmp) == PARALLEL
7943 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
7944 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
7950 /* Check to see if loop can be terminated by a "decrement and branch until
7951 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
7952 Also try reversing an increment loop to a decrement loop
7953 to see if the optimization can be performed.
7954 Value is nonzero if optimization was performed. */
7956 /* This is useful even if the architecture doesn't have such an insn,
7957 because it might change a loops which increments from 0 to n to a loop
7958 which decrements from n to 0. A loop that decrements to zero is usually
7959 faster than one that increments from zero. */
7961 /* ??? This could be rewritten to use some of the loop unrolling procedures,
7962 such as approx_final_value, biv_total_increment, loop_iterations, and
7963 final_[bg]iv_value. */
7966 check_dbra_loop (struct loop *loop, int insn_count)
7968 struct loop_info *loop_info = LOOP_INFO (loop);
7969 struct loop_regs *regs = LOOP_REGS (loop);
7970 struct loop_ivs *ivs = LOOP_IVS (loop);
7971 struct iv_class *bl;
7978 rtx before_comparison;
7982 int compare_and_branch;
7983 rtx loop_start = loop->start;
7984 rtx loop_end = loop->end;
7986 /* If last insn is a conditional branch, and the insn before tests a
7987 register value, try to optimize it. Otherwise, we can't do anything. */
7989 jump = PREV_INSN (loop_end);
7990 comparison = get_condition_for_loop (loop, jump);
7991 if (comparison == 0)
7993 if (!onlyjump_p (jump))
7996 /* Try to compute whether the compare/branch at the loop end is one or
7997 two instructions. */
7998 get_condition (jump, &first_compare);
7999 if (first_compare == jump)
8000 compare_and_branch = 1;
8001 else if (first_compare == prev_nonnote_insn (jump))
8002 compare_and_branch = 2;
8007 /* If more than one condition is present to control the loop, then
8008 do not proceed, as this function does not know how to rewrite
8009 loop tests with more than one condition.
8011 Look backwards from the first insn in the last comparison
8012 sequence and see if we've got another comparison sequence. */
8015 if ((jump1 = prev_nonnote_insn (first_compare)) != loop->cont)
8016 if (GET_CODE (jump1) == JUMP_INSN)
8020 /* Check all of the bivs to see if the compare uses one of them.
8021 Skip biv's set more than once because we can't guarantee that
8022 it will be zero on the last iteration. Also skip if the biv is
8023 used between its update and the test insn. */
8025 for (bl = ivs->list; bl; bl = bl->next)
8027 if (bl->biv_count == 1
8028 && ! bl->biv->maybe_multiple
8029 && bl->biv->dest_reg == XEXP (comparison, 0)
8030 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
8038 /* Look for the case where the basic induction variable is always
8039 nonnegative, and equals zero on the last iteration.
8040 In this case, add a reg_note REG_NONNEG, which allows the
8041 m68k DBRA instruction to be used. */
8043 if (((GET_CODE (comparison) == GT
8044 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
8045 && INTVAL (XEXP (comparison, 1)) == -1)
8046 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
8047 && GET_CODE (bl->biv->add_val) == CONST_INT
8048 && INTVAL (bl->biv->add_val) < 0)
8050 /* Initial value must be greater than 0,
8051 init_val % -dec_value == 0 to ensure that it equals zero on
8052 the last iteration */
8054 if (GET_CODE (bl->initial_value) == CONST_INT
8055 && INTVAL (bl->initial_value) > 0
8056 && (INTVAL (bl->initial_value)
8057 % (-INTVAL (bl->biv->add_val))) == 0)
8059 /* register always nonnegative, add REG_NOTE to branch */
8060 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
8062 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
8069 /* If the decrement is 1 and the value was tested as >= 0 before
8070 the loop, then we can safely optimize. */
8071 for (p = loop_start; p; p = PREV_INSN (p))
8073 if (GET_CODE (p) == CODE_LABEL)
8075 if (GET_CODE (p) != JUMP_INSN)
8078 before_comparison = get_condition_for_loop (loop, p);
8079 if (before_comparison
8080 && XEXP (before_comparison, 0) == bl->biv->dest_reg
8081 && GET_CODE (before_comparison) == LT
8082 && XEXP (before_comparison, 1) == const0_rtx
8083 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
8084 && INTVAL (bl->biv->add_val) == -1)
8086 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
8088 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
8096 else if (GET_CODE (bl->biv->add_val) == CONST_INT
8097 && INTVAL (bl->biv->add_val) > 0)
8099 /* Try to change inc to dec, so can apply above optimization. */
8101 all registers modified are induction variables or invariant,
8102 all memory references have non-overlapping addresses
8103 (obviously true if only one write)
8104 allow 2 insns for the compare/jump at the end of the loop. */
8105 /* Also, we must avoid any instructions which use both the reversed
8106 biv and another biv. Such instructions will fail if the loop is
8107 reversed. We meet this condition by requiring that either
8108 no_use_except_counting is true, or else that there is only
8110 int num_nonfixed_reads = 0;
8111 /* 1 if the iteration var is used only to count iterations. */
8112 int no_use_except_counting = 0;
8113 /* 1 if the loop has no memory store, or it has a single memory store
8114 which is reversible. */
8115 int reversible_mem_store = 1;
8117 if (bl->giv_count == 0
8118 && !loop->exit_count
8119 && !loop_info->has_multiple_exit_targets)
8121 rtx bivreg = regno_reg_rtx[bl->regno];
8122 struct iv_class *blt;
8124 /* If there are no givs for this biv, and the only exit is the
8125 fall through at the end of the loop, then
8126 see if perhaps there are no uses except to count. */
8127 no_use_except_counting = 1;
8128 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8131 rtx set = single_set (p);
8133 if (set && GET_CODE (SET_DEST (set)) == REG
8134 && REGNO (SET_DEST (set)) == bl->regno)
8135 /* An insn that sets the biv is okay. */
8137 else if (!reg_mentioned_p (bivreg, PATTERN (p)))
8138 /* An insn that doesn't mention the biv is okay. */
8140 else if (p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
8141 || p == prev_nonnote_insn (loop_end))
8143 /* If either of these insns uses the biv and sets a pseudo
8144 that has more than one usage, then the biv has uses
8145 other than counting since it's used to derive a value
8146 that is used more than one time. */
8147 note_stores (PATTERN (p), note_set_pseudo_multiple_uses,
8149 if (regs->multiple_uses)
8151 no_use_except_counting = 0;
8157 no_use_except_counting = 0;
8162 /* A biv has uses besides counting if it is used to set
8164 for (blt = ivs->list; blt; blt = blt->next)
8166 && reg_mentioned_p (bivreg, SET_SRC (blt->init_set)))
8168 no_use_except_counting = 0;
8173 if (no_use_except_counting)
8174 /* No need to worry about MEMs. */
8176 else if (loop_info->num_mem_sets <= 1)
8178 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8180 num_nonfixed_reads += count_nonfixed_reads (loop, PATTERN (p));
8182 /* If the loop has a single store, and the destination address is
8183 invariant, then we can't reverse the loop, because this address
8184 might then have the wrong value at loop exit.
8185 This would work if the source was invariant also, however, in that
8186 case, the insn should have been moved out of the loop. */
8188 if (loop_info->num_mem_sets == 1)
8190 struct induction *v;
8192 /* If we could prove that each of the memory locations
8193 written to was different, then we could reverse the
8194 store -- but we don't presently have any way of
8196 reversible_mem_store = 0;
8198 /* If the store depends on a register that is set after the
8199 store, it depends on the initial value, and is thus not
8201 for (v = bl->giv; reversible_mem_store && v; v = v->next_iv)
8203 if (v->giv_type == DEST_REG
8204 && reg_mentioned_p (v->dest_reg,
8205 PATTERN (loop_info->first_loop_store_insn))
8206 && loop_insn_first_p (loop_info->first_loop_store_insn,
8208 reversible_mem_store = 0;
8215 /* This code only acts for innermost loops. Also it simplifies
8216 the memory address check by only reversing loops with
8217 zero or one memory access.
8218 Two memory accesses could involve parts of the same array,
8219 and that can't be reversed.
8220 If the biv is used only for counting, than we don't need to worry
8221 about all these things. */
8223 if ((num_nonfixed_reads <= 1
8224 && ! loop_info->has_nonconst_call
8225 && ! loop_info->has_prefetch
8226 && ! loop_info->has_volatile
8227 && reversible_mem_store
8228 && (bl->giv_count + bl->biv_count + loop_info->num_mem_sets
8229 + num_unmoved_movables (loop) + compare_and_branch == insn_count)
8230 && (bl == ivs->list && bl->next == 0))
8231 || (no_use_except_counting && ! loop_info->has_prefetch))
8235 /* Loop can be reversed. */
8236 if (loop_dump_stream)
8237 fprintf (loop_dump_stream, "Can reverse loop\n");
8239 /* Now check other conditions:
8241 The increment must be a constant, as must the initial value,
8242 and the comparison code must be LT.
8244 This test can probably be improved since +/- 1 in the constant
8245 can be obtained by changing LT to LE and vice versa; this is
8249 /* for constants, LE gets turned into LT */
8250 && (GET_CODE (comparison) == LT
8251 || (GET_CODE (comparison) == LE
8252 && no_use_except_counting)))
8254 HOST_WIDE_INT add_val, add_adjust, comparison_val = 0;
8255 rtx initial_value, comparison_value;
8257 enum rtx_code cmp_code;
8258 int comparison_const_width;
8259 unsigned HOST_WIDE_INT comparison_sign_mask;
8261 add_val = INTVAL (bl->biv->add_val);
8262 comparison_value = XEXP (comparison, 1);
8263 if (GET_MODE (comparison_value) == VOIDmode)
8264 comparison_const_width
8265 = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 0)));
8267 comparison_const_width
8268 = GET_MODE_BITSIZE (GET_MODE (comparison_value));
8269 if (comparison_const_width > HOST_BITS_PER_WIDE_INT)
8270 comparison_const_width = HOST_BITS_PER_WIDE_INT;
8271 comparison_sign_mask
8272 = (unsigned HOST_WIDE_INT) 1 << (comparison_const_width - 1);
8274 /* If the comparison value is not a loop invariant, then we
8275 can not reverse this loop.
8277 ??? If the insns which initialize the comparison value as
8278 a whole compute an invariant result, then we could move
8279 them out of the loop and proceed with loop reversal. */
8280 if (! loop_invariant_p (loop, comparison_value))
8283 if (GET_CODE (comparison_value) == CONST_INT)
8284 comparison_val = INTVAL (comparison_value);
8285 initial_value = bl->initial_value;
8287 /* Normalize the initial value if it is an integer and
8288 has no other use except as a counter. This will allow
8289 a few more loops to be reversed. */
8290 if (no_use_except_counting
8291 && GET_CODE (comparison_value) == CONST_INT
8292 && GET_CODE (initial_value) == CONST_INT)
8294 comparison_val = comparison_val - INTVAL (bl->initial_value);
8295 /* The code below requires comparison_val to be a multiple
8296 of add_val in order to do the loop reversal, so
8297 round up comparison_val to a multiple of add_val.
8298 Since comparison_value is constant, we know that the
8299 current comparison code is LT. */
8300 comparison_val = comparison_val + add_val - 1;
8302 -= (unsigned HOST_WIDE_INT) comparison_val % add_val;
8303 /* We postpone overflow checks for COMPARISON_VAL here;
8304 even if there is an overflow, we might still be able to
8305 reverse the loop, if converting the loop exit test to
8307 initial_value = const0_rtx;
8310 /* First check if we can do a vanilla loop reversal. */
8311 if (initial_value == const0_rtx
8312 /* If we have a decrement_and_branch_on_count,
8313 prefer the NE test, since this will allow that
8314 instruction to be generated. Note that we must
8315 use a vanilla loop reversal if the biv is used to
8316 calculate a giv or has a non-counting use. */
8317 #if ! defined (HAVE_decrement_and_branch_until_zero) \
8318 && defined (HAVE_decrement_and_branch_on_count)
8319 && (! (add_val == 1 && loop->vtop
8320 && (bl->biv_count == 0
8321 || no_use_except_counting)))
8323 && GET_CODE (comparison_value) == CONST_INT
8324 /* Now do postponed overflow checks on COMPARISON_VAL. */
8325 && ! (((comparison_val - add_val) ^ INTVAL (comparison_value))
8326 & comparison_sign_mask))
8328 /* Register will always be nonnegative, with value
8329 0 on last iteration */
8330 add_adjust = add_val;
8334 else if (add_val == 1 && loop->vtop
8335 && (bl->biv_count == 0
8336 || no_use_except_counting))
8344 if (GET_CODE (comparison) == LE)
8345 add_adjust -= add_val;
8347 /* If the initial value is not zero, or if the comparison
8348 value is not an exact multiple of the increment, then we
8349 can not reverse this loop. */
8350 if (initial_value == const0_rtx
8351 && GET_CODE (comparison_value) == CONST_INT)
8353 if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0)
8358 if (! no_use_except_counting || add_val != 1)
8362 final_value = comparison_value;
8364 /* Reset these in case we normalized the initial value
8365 and comparison value above. */
8366 if (GET_CODE (comparison_value) == CONST_INT
8367 && GET_CODE (initial_value) == CONST_INT)
8369 comparison_value = GEN_INT (comparison_val);
8371 = GEN_INT (comparison_val + INTVAL (bl->initial_value));
8373 bl->initial_value = initial_value;
8375 /* Save some info needed to produce the new insns. */
8376 reg = bl->biv->dest_reg;
8377 jump_label = condjump_label (PREV_INSN (loop_end));
8378 new_add_val = GEN_INT (-INTVAL (bl->biv->add_val));
8380 /* Set start_value; if this is not a CONST_INT, we need
8382 Initialize biv to start_value before loop start.
8383 The old initializing insn will be deleted as a
8384 dead store by flow.c. */
8385 if (initial_value == const0_rtx
8386 && GET_CODE (comparison_value) == CONST_INT)
8388 start_value = GEN_INT (comparison_val - add_adjust);
8389 loop_insn_hoist (loop, gen_move_insn (reg, start_value));
8391 else if (GET_CODE (initial_value) == CONST_INT)
8393 enum machine_mode mode = GET_MODE (reg);
8394 rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust);
8395 rtx add_insn = gen_add3_insn (reg, comparison_value, offset);
8401 = gen_rtx_PLUS (mode, comparison_value, offset);
8402 loop_insn_hoist (loop, add_insn);
8403 if (GET_CODE (comparison) == LE)
8404 final_value = gen_rtx_PLUS (mode, comparison_value,
8407 else if (! add_adjust)
8409 enum machine_mode mode = GET_MODE (reg);
8410 rtx sub_insn = gen_sub3_insn (reg, comparison_value,
8416 = gen_rtx_MINUS (mode, comparison_value, initial_value);
8417 loop_insn_hoist (loop, sub_insn);
8420 /* We could handle the other cases too, but it'll be
8421 better to have a testcase first. */
8424 /* We may not have a single insn which can increment a reg, so
8425 create a sequence to hold all the insns from expand_inc. */
8427 expand_inc (reg, new_add_val);
8431 p = loop_insn_emit_before (loop, 0, bl->biv->insn, tem);
8432 delete_insn (bl->biv->insn);
8434 /* Update biv info to reflect its new status. */
8436 bl->initial_value = start_value;
8437 bl->biv->add_val = new_add_val;
8439 /* Update loop info. */
8440 loop_info->initial_value = reg;
8441 loop_info->initial_equiv_value = reg;
8442 loop_info->final_value = const0_rtx;
8443 loop_info->final_equiv_value = const0_rtx;
8444 loop_info->comparison_value = const0_rtx;
8445 loop_info->comparison_code = cmp_code;
8446 loop_info->increment = new_add_val;
8448 /* Inc LABEL_NUSES so that delete_insn will
8449 not delete the label. */
8450 LABEL_NUSES (XEXP (jump_label, 0))++;
8452 /* Emit an insn after the end of the loop to set the biv's
8453 proper exit value if it is used anywhere outside the loop. */
8454 if ((REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
8456 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
8457 loop_insn_sink (loop, gen_load_of_final_value (reg, final_value));
8459 /* Delete compare/branch at end of loop. */
8460 delete_related_insns (PREV_INSN (loop_end));
8461 if (compare_and_branch == 2)
8462 delete_related_insns (first_compare);
8464 /* Add new compare/branch insn at end of loop. */
8466 emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX,
8468 XEXP (jump_label, 0));
8471 emit_jump_insn_before (tem, loop_end);
8473 for (tem = PREV_INSN (loop_end);
8474 tem && GET_CODE (tem) != JUMP_INSN;
8475 tem = PREV_INSN (tem))
8479 JUMP_LABEL (tem) = XEXP (jump_label, 0);
8485 /* Increment of LABEL_NUSES done above. */
8486 /* Register is now always nonnegative,
8487 so add REG_NONNEG note to the branch. */
8488 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, reg,
8494 /* No insn may reference both the reversed and another biv or it
8495 will fail (see comment near the top of the loop reversal
8497 Earlier on, we have verified that the biv has no use except
8498 counting, or it is the only biv in this function.
8499 However, the code that computes no_use_except_counting does
8500 not verify reg notes. It's possible to have an insn that
8501 references another biv, and has a REG_EQUAL note with an
8502 expression based on the reversed biv. To avoid this case,
8503 remove all REG_EQUAL notes based on the reversed biv
8505 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8509 rtx set = single_set (p);
8510 /* If this is a set of a GIV based on the reversed biv, any
8511 REG_EQUAL notes should still be correct. */
8513 || GET_CODE (SET_DEST (set)) != REG
8514 || (size_t) REGNO (SET_DEST (set)) >= ivs->n_regs
8515 || REG_IV_TYPE (ivs, REGNO (SET_DEST (set))) != GENERAL_INDUCT
8516 || REG_IV_INFO (ivs, REGNO (SET_DEST (set)))->src_reg != bl->biv->src_reg)
8517 for (pnote = ®_NOTES (p); *pnote;)
8519 if (REG_NOTE_KIND (*pnote) == REG_EQUAL
8520 && reg_mentioned_p (regno_reg_rtx[bl->regno],
8522 *pnote = XEXP (*pnote, 1);
8524 pnote = &XEXP (*pnote, 1);
8528 /* Mark that this biv has been reversed. Each giv which depends
8529 on this biv, and which is also live past the end of the loop
8530 will have to be fixed up. */
8534 if (loop_dump_stream)
8536 fprintf (loop_dump_stream, "Reversed loop");
8538 fprintf (loop_dump_stream, " and added reg_nonneg\n");
8540 fprintf (loop_dump_stream, "\n");
8551 /* Verify whether the biv BL appears to be eliminable,
8552 based on the insns in the loop that refer to it.
8554 If ELIMINATE_P is nonzero, actually do the elimination.
8556 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
8557 determine whether invariant insns should be placed inside or at the
8558 start of the loop. */
8561 maybe_eliminate_biv (const struct loop *loop, struct iv_class *bl,
8562 int eliminate_p, int threshold, int insn_count)
8564 struct loop_ivs *ivs = LOOP_IVS (loop);
8565 rtx reg = bl->biv->dest_reg;
8568 /* Scan all insns in the loop, stopping if we find one that uses the
8569 biv in a way that we cannot eliminate. */
8571 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
8573 enum rtx_code code = GET_CODE (p);
8574 basic_block where_bb = 0;
8575 rtx where_insn = threshold >= insn_count ? 0 : p;
8578 /* If this is a libcall that sets a giv, skip ahead to its end. */
8579 if (GET_RTX_CLASS (code) == 'i')
8581 note = find_reg_note (p, REG_LIBCALL, NULL_RTX);
8585 rtx last = XEXP (note, 0);
8586 rtx set = single_set (last);
8588 if (set && GET_CODE (SET_DEST (set)) == REG)
8590 unsigned int regno = REGNO (SET_DEST (set));
8592 if (regno < ivs->n_regs
8593 && REG_IV_TYPE (ivs, regno) == GENERAL_INDUCT
8594 && REG_IV_INFO (ivs, regno)->src_reg == bl->biv->src_reg)
8600 /* Closely examine the insn if the biv is mentioned. */
8601 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
8602 && reg_mentioned_p (reg, PATTERN (p))
8603 && ! maybe_eliminate_biv_1 (loop, PATTERN (p), p, bl,
8604 eliminate_p, where_bb, where_insn))
8606 if (loop_dump_stream)
8607 fprintf (loop_dump_stream,
8608 "Cannot eliminate biv %d: biv used in insn %d.\n",
8609 bl->regno, INSN_UID (p));
8613 /* If we are eliminating, kill REG_EQUAL notes mentioning the biv. */
8615 && (note = find_reg_note (p, REG_EQUAL, NULL_RTX)) != NULL_RTX
8616 && reg_mentioned_p (reg, XEXP (note, 0)))
8617 remove_note (p, note);
8622 if (loop_dump_stream)
8623 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
8624 bl->regno, eliminate_p ? "was" : "can be");
8631 /* INSN and REFERENCE are instructions in the same insn chain.
8632 Return nonzero if INSN is first. */
8635 loop_insn_first_p (rtx insn, rtx reference)
8639 for (p = insn, q = reference;;)
8641 /* Start with test for not first so that INSN == REFERENCE yields not
8643 if (q == insn || ! p)
8645 if (p == reference || ! q)
8648 /* Either of P or Q might be a NOTE. Notes have the same LUID as the
8649 previous insn, hence the <= comparison below does not work if
8651 if (INSN_UID (p) < max_uid_for_loop
8652 && INSN_UID (q) < max_uid_for_loop
8653 && GET_CODE (p) != NOTE)
8654 return INSN_LUID (p) <= INSN_LUID (q);
8656 if (INSN_UID (p) >= max_uid_for_loop
8657 || GET_CODE (p) == NOTE)
8659 if (INSN_UID (q) >= max_uid_for_loop)
8664 /* We are trying to eliminate BIV in INSN using GIV. Return nonzero if
8665 the offset that we have to take into account due to auto-increment /
8666 div derivation is zero. */
8668 biv_elimination_giv_has_0_offset (struct induction *biv,
8669 struct induction *giv, rtx insn)
8671 /* If the giv V had the auto-inc address optimization applied
8672 to it, and INSN occurs between the giv insn and the biv
8673 insn, then we'd have to adjust the value used here.
8674 This is rare, so we don't bother to make this possible. */
8675 if (giv->auto_inc_opt
8676 && ((loop_insn_first_p (giv->insn, insn)
8677 && loop_insn_first_p (insn, biv->insn))
8678 || (loop_insn_first_p (biv->insn, insn)
8679 && loop_insn_first_p (insn, giv->insn))))
8685 /* If BL appears in X (part of the pattern of INSN), see if we can
8686 eliminate its use. If so, return 1. If not, return 0.
8688 If BIV does not appear in X, return 1.
8690 If ELIMINATE_P is nonzero, actually do the elimination.
8691 WHERE_INSN/WHERE_BB indicate where extra insns should be added.
8692 Depending on how many items have been moved out of the loop, it
8693 will either be before INSN (when WHERE_INSN is nonzero) or at the
8694 start of the loop (when WHERE_INSN is zero). */
8697 maybe_eliminate_biv_1 (const struct loop *loop, rtx x, rtx insn,
8698 struct iv_class *bl, int eliminate_p,
8699 basic_block where_bb, rtx where_insn)
8701 enum rtx_code code = GET_CODE (x);
8702 rtx reg = bl->biv->dest_reg;
8703 enum machine_mode mode = GET_MODE (reg);
8704 struct induction *v;
8716 /* If we haven't already been able to do something with this BIV,
8717 we can't eliminate it. */
8723 /* If this sets the BIV, it is not a problem. */
8724 if (SET_DEST (x) == reg)
8727 /* If this is an insn that defines a giv, it is also ok because
8728 it will go away when the giv is reduced. */
8729 for (v = bl->giv; v; v = v->next_iv)
8730 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
8734 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
8736 /* Can replace with any giv that was reduced and
8737 that has (MULT_VAL != 0) and (ADD_VAL == 0).
8738 Require a constant for MULT_VAL, so we know it's nonzero.
8739 ??? We disable this optimization to avoid potential
8742 for (v = bl->giv; v; v = v->next_iv)
8743 if (GET_CODE (v->mult_val) == CONST_INT && v->mult_val != const0_rtx
8744 && v->add_val == const0_rtx
8745 && ! v->ignore && ! v->maybe_dead && v->always_computable
8749 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8755 /* If the giv has the opposite direction of change,
8756 then reverse the comparison. */
8757 if (INTVAL (v->mult_val) < 0)
8758 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
8759 const0_rtx, v->new_reg);
8763 /* We can probably test that giv's reduced reg. */
8764 if (validate_change (insn, &SET_SRC (x), new, 0))
8768 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
8769 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
8770 Require a constant for MULT_VAL, so we know it's nonzero.
8771 ??? Do this only if ADD_VAL is a pointer to avoid a potential
8772 overflow problem. */
8774 for (v = bl->giv; v; v = v->next_iv)
8775 if (GET_CODE (v->mult_val) == CONST_INT
8776 && v->mult_val != const0_rtx
8777 && ! v->ignore && ! v->maybe_dead && v->always_computable
8779 && (GET_CODE (v->add_val) == SYMBOL_REF
8780 || GET_CODE (v->add_val) == LABEL_REF
8781 || GET_CODE (v->add_val) == CONST
8782 || (GET_CODE (v->add_val) == REG
8783 && REG_POINTER (v->add_val))))
8785 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8791 /* If the giv has the opposite direction of change,
8792 then reverse the comparison. */
8793 if (INTVAL (v->mult_val) < 0)
8794 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
8797 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
8798 copy_rtx (v->add_val));
8800 /* Replace biv with the giv's reduced register. */
8801 update_reg_last_use (v->add_val, insn);
8802 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8805 /* Insn doesn't support that constant or invariant. Copy it
8806 into a register (it will be a loop invariant.) */
8807 tem = gen_reg_rtx (GET_MODE (v->new_reg));
8809 loop_insn_emit_before (loop, 0, where_insn,
8811 copy_rtx (v->add_val)));
8813 /* Substitute the new register for its invariant value in
8814 the compare expression. */
8815 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
8816 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8825 case GT: case GE: case GTU: case GEU:
8826 case LT: case LE: case LTU: case LEU:
8827 /* See if either argument is the biv. */
8828 if (XEXP (x, 0) == reg)
8829 arg = XEXP (x, 1), arg_operand = 1;
8830 else if (XEXP (x, 1) == reg)
8831 arg = XEXP (x, 0), arg_operand = 0;
8835 if (CONSTANT_P (arg))
8837 /* First try to replace with any giv that has constant positive
8838 mult_val and constant add_val. We might be able to support
8839 negative mult_val, but it seems complex to do it in general. */
8841 for (v = bl->giv; v; v = v->next_iv)
8842 if (GET_CODE (v->mult_val) == CONST_INT
8843 && INTVAL (v->mult_val) > 0
8844 && (GET_CODE (v->add_val) == SYMBOL_REF
8845 || GET_CODE (v->add_val) == LABEL_REF
8846 || GET_CODE (v->add_val) == CONST
8847 || (GET_CODE (v->add_val) == REG
8848 && REG_POINTER (v->add_val)))
8849 && ! v->ignore && ! v->maybe_dead && v->always_computable
8852 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8855 /* Don't eliminate if the linear combination that makes up
8856 the giv overflows when it is applied to ARG. */
8857 if (GET_CODE (arg) == CONST_INT)
8861 if (GET_CODE (v->add_val) == CONST_INT)
8862 add_val = v->add_val;
8864 add_val = const0_rtx;
8866 if (const_mult_add_overflow_p (arg, v->mult_val,
8874 /* Replace biv with the giv's reduced reg. */
8875 validate_change (insn, &XEXP (x, 1 - arg_operand), v->new_reg, 1);
8877 /* If all constants are actually constant integers and
8878 the derived constant can be directly placed in the COMPARE,
8880 if (GET_CODE (arg) == CONST_INT
8881 && GET_CODE (v->add_val) == CONST_INT)
8883 tem = expand_mult_add (arg, NULL_RTX, v->mult_val,
8884 v->add_val, mode, 1);
8888 /* Otherwise, load it into a register. */
8889 tem = gen_reg_rtx (mode);
8890 loop_iv_add_mult_emit_before (loop, arg,
8891 v->mult_val, v->add_val,
8892 tem, where_bb, where_insn);
8895 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8897 if (apply_change_group ())
8901 /* Look for giv with positive constant mult_val and nonconst add_val.
8902 Insert insns to calculate new compare value.
8903 ??? Turn this off due to possible overflow. */
8905 for (v = bl->giv; v; v = v->next_iv)
8906 if (GET_CODE (v->mult_val) == CONST_INT
8907 && INTVAL (v->mult_val) > 0
8908 && ! v->ignore && ! v->maybe_dead && v->always_computable
8914 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8920 tem = gen_reg_rtx (mode);
8922 /* Replace biv with giv's reduced register. */
8923 validate_change (insn, &XEXP (x, 1 - arg_operand),
8926 /* Compute value to compare against. */
8927 loop_iv_add_mult_emit_before (loop, arg,
8928 v->mult_val, v->add_val,
8929 tem, where_bb, where_insn);
8930 /* Use it in this insn. */
8931 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8932 if (apply_change_group ())
8936 else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
8938 if (loop_invariant_p (loop, arg) == 1)
8940 /* Look for giv with constant positive mult_val and nonconst
8941 add_val. Insert insns to compute new compare value.
8942 ??? Turn this off due to possible overflow. */
8944 for (v = bl->giv; v; v = v->next_iv)
8945 if (GET_CODE (v->mult_val) == CONST_INT && INTVAL (v->mult_val) > 0
8946 && ! v->ignore && ! v->maybe_dead && v->always_computable
8952 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8958 tem = gen_reg_rtx (mode);
8960 /* Replace biv with giv's reduced register. */
8961 validate_change (insn, &XEXP (x, 1 - arg_operand),
8964 /* Compute value to compare against. */
8965 loop_iv_add_mult_emit_before (loop, arg,
8966 v->mult_val, v->add_val,
8967 tem, where_bb, where_insn);
8968 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8969 if (apply_change_group ())
8974 /* This code has problems. Basically, you can't know when
8975 seeing if we will eliminate BL, whether a particular giv
8976 of ARG will be reduced. If it isn't going to be reduced,
8977 we can't eliminate BL. We can try forcing it to be reduced,
8978 but that can generate poor code.
8980 The problem is that the benefit of reducing TV, below should
8981 be increased if BL can actually be eliminated, but this means
8982 we might have to do a topological sort of the order in which
8983 we try to process biv. It doesn't seem worthwhile to do
8984 this sort of thing now. */
8987 /* Otherwise the reg compared with had better be a biv. */
8988 if (GET_CODE (arg) != REG
8989 || REG_IV_TYPE (ivs, REGNO (arg)) != BASIC_INDUCT)
8992 /* Look for a pair of givs, one for each biv,
8993 with identical coefficients. */
8994 for (v = bl->giv; v; v = v->next_iv)
8996 struct induction *tv;
8998 if (v->ignore || v->maybe_dead || v->mode != mode)
9001 for (tv = REG_IV_CLASS (ivs, REGNO (arg))->giv; tv;
9003 if (! tv->ignore && ! tv->maybe_dead
9004 && rtx_equal_p (tv->mult_val, v->mult_val)
9005 && rtx_equal_p (tv->add_val, v->add_val)
9006 && tv->mode == mode)
9008 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
9014 /* Replace biv with its giv's reduced reg. */
9015 XEXP (x, 1 - arg_operand) = v->new_reg;
9016 /* Replace other operand with the other giv's
9018 XEXP (x, arg_operand) = tv->new_reg;
9025 /* If we get here, the biv can't be eliminated. */
9029 /* If this address is a DEST_ADDR giv, it doesn't matter if the
9030 biv is used in it, since it will be replaced. */
9031 for (v = bl->giv; v; v = v->next_iv)
9032 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
9040 /* See if any subexpression fails elimination. */
9041 fmt = GET_RTX_FORMAT (code);
9042 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
9047 if (! maybe_eliminate_biv_1 (loop, XEXP (x, i), insn, bl,
9048 eliminate_p, where_bb, where_insn))
9053 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9054 if (! maybe_eliminate_biv_1 (loop, XVECEXP (x, i, j), insn, bl,
9055 eliminate_p, where_bb, where_insn))
9064 /* Return nonzero if the last use of REG
9065 is in an insn following INSN in the same basic block. */
9068 last_use_this_basic_block (rtx reg, rtx insn)
9072 n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
9075 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
9081 /* Called via `note_stores' to record the initial value of a biv. Here we
9082 just record the location of the set and process it later. */
9085 record_initial (rtx dest, rtx set, void *data ATTRIBUTE_UNUSED)
9087 struct loop_ivs *ivs = (struct loop_ivs *) data;
9088 struct iv_class *bl;
9090 if (GET_CODE (dest) != REG
9091 || REGNO (dest) >= ivs->n_regs
9092 || REG_IV_TYPE (ivs, REGNO (dest)) != BASIC_INDUCT)
9095 bl = REG_IV_CLASS (ivs, REGNO (dest));
9097 /* If this is the first set found, record it. */
9098 if (bl->init_insn == 0)
9100 bl->init_insn = note_insn;
9105 /* If any of the registers in X are "old" and currently have a last use earlier
9106 than INSN, update them to have a last use of INSN. Their actual last use
9107 will be the previous insn but it will not have a valid uid_luid so we can't
9108 use it. X must be a source expression only. */
9111 update_reg_last_use (rtx x, rtx insn)
9113 /* Check for the case where INSN does not have a valid luid. In this case,
9114 there is no need to modify the regno_last_uid, as this can only happen
9115 when code is inserted after the loop_end to set a pseudo's final value,
9116 and hence this insn will never be the last use of x.
9117 ???? This comment is not correct. See for example loop_givs_reduce.
9118 This may insert an insn before another new insn. */
9119 if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
9120 && INSN_UID (insn) < max_uid_for_loop
9121 && REGNO_LAST_LUID (REGNO (x)) < INSN_LUID (insn))
9123 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
9128 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
9129 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
9132 update_reg_last_use (XEXP (x, i), insn);
9133 else if (fmt[i] == 'E')
9134 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9135 update_reg_last_use (XVECEXP (x, i, j), insn);
9140 /* Given an insn INSN and condition COND, return the condition in a
9141 canonical form to simplify testing by callers. Specifically:
9143 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
9144 (2) Both operands will be machine operands; (cc0) will have been replaced.
9145 (3) If an operand is a constant, it will be the second operand.
9146 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
9147 for GE, GEU, and LEU.
9149 If the condition cannot be understood, or is an inequality floating-point
9150 comparison which needs to be reversed, 0 will be returned.
9152 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
9154 If EARLIEST is nonzero, it is a pointer to a place where the earliest
9155 insn used in locating the condition was found. If a replacement test
9156 of the condition is desired, it should be placed in front of that
9157 insn and we will be sure that the inputs are still valid.
9159 If WANT_REG is nonzero, we wish the condition to be relative to that
9160 register, if possible. Therefore, do not canonicalize the condition
9164 canonicalize_condition (rtx insn, rtx cond, int reverse, rtx *earliest,
9172 int reverse_code = 0;
9173 enum machine_mode mode;
9175 code = GET_CODE (cond);
9176 mode = GET_MODE (cond);
9177 op0 = XEXP (cond, 0);
9178 op1 = XEXP (cond, 1);
9181 code = reversed_comparison_code (cond, insn);
9182 if (code == UNKNOWN)
9188 /* If we are comparing a register with zero, see if the register is set
9189 in the previous insn to a COMPARE or a comparison operation. Perform
9190 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
9193 while (GET_RTX_CLASS (code) == '<'
9194 && op1 == CONST0_RTX (GET_MODE (op0))
9197 /* Set nonzero when we find something of interest. */
9201 /* If comparison with cc0, import actual comparison from compare
9205 if ((prev = prev_nonnote_insn (prev)) == 0
9206 || GET_CODE (prev) != INSN
9207 || (set = single_set (prev)) == 0
9208 || SET_DEST (set) != cc0_rtx)
9211 op0 = SET_SRC (set);
9212 op1 = CONST0_RTX (GET_MODE (op0));
9218 /* If this is a COMPARE, pick up the two things being compared. */
9219 if (GET_CODE (op0) == COMPARE)
9221 op1 = XEXP (op0, 1);
9222 op0 = XEXP (op0, 0);
9225 else if (GET_CODE (op0) != REG)
9228 /* Go back to the previous insn. Stop if it is not an INSN. We also
9229 stop if it isn't a single set or if it has a REG_INC note because
9230 we don't want to bother dealing with it. */
9232 if ((prev = prev_nonnote_insn (prev)) == 0
9233 || GET_CODE (prev) != INSN
9234 || FIND_REG_INC_NOTE (prev, NULL_RTX))
9237 set = set_of (op0, prev);
9240 && (GET_CODE (set) != SET
9241 || !rtx_equal_p (SET_DEST (set), op0)))
9244 /* If this is setting OP0, get what it sets it to if it looks
9248 enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
9249 #ifdef FLOAT_STORE_FLAG_VALUE
9250 REAL_VALUE_TYPE fsfv;
9253 /* ??? We may not combine comparisons done in a CCmode with
9254 comparisons not done in a CCmode. This is to aid targets
9255 like Alpha that have an IEEE compliant EQ instruction, and
9256 a non-IEEE compliant BEQ instruction. The use of CCmode is
9257 actually artificial, simply to prevent the combination, but
9258 should not affect other platforms.
9260 However, we must allow VOIDmode comparisons to match either
9261 CCmode or non-CCmode comparison, because some ports have
9262 modeless comparisons inside branch patterns.
9264 ??? This mode check should perhaps look more like the mode check
9265 in simplify_comparison in combine. */
9267 if ((GET_CODE (SET_SRC (set)) == COMPARE
9270 && GET_MODE_CLASS (inner_mode) == MODE_INT
9271 && (GET_MODE_BITSIZE (inner_mode)
9272 <= HOST_BITS_PER_WIDE_INT)
9273 && (STORE_FLAG_VALUE
9274 & ((HOST_WIDE_INT) 1
9275 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9276 #ifdef FLOAT_STORE_FLAG_VALUE
9278 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9279 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
9280 REAL_VALUE_NEGATIVE (fsfv)))
9283 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'))
9284 && (((GET_MODE_CLASS (mode) == MODE_CC)
9285 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9286 || mode == VOIDmode || inner_mode == VOIDmode))
9288 else if (((code == EQ
9290 && (GET_MODE_BITSIZE (inner_mode)
9291 <= HOST_BITS_PER_WIDE_INT)
9292 && GET_MODE_CLASS (inner_mode) == MODE_INT
9293 && (STORE_FLAG_VALUE
9294 & ((HOST_WIDE_INT) 1
9295 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9296 #ifdef FLOAT_STORE_FLAG_VALUE
9298 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9299 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
9300 REAL_VALUE_NEGATIVE (fsfv)))
9303 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'
9304 && (((GET_MODE_CLASS (mode) == MODE_CC)
9305 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9306 || mode == VOIDmode || inner_mode == VOIDmode))
9316 else if (reg_set_p (op0, prev))
9317 /* If this sets OP0, but not directly, we have to give up. */
9322 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
9323 code = GET_CODE (x);
9326 code = reversed_comparison_code (x, prev);
9327 if (code == UNKNOWN)
9332 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
9338 /* If constant is first, put it last. */
9339 if (CONSTANT_P (op0))
9340 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
9342 /* If OP0 is the result of a comparison, we weren't able to find what
9343 was really being compared, so fail. */
9344 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
9347 /* Canonicalize any ordered comparison with integers involving equality
9348 if we can do computations in the relevant mode and we do not
9351 if (GET_CODE (op1) == CONST_INT
9352 && GET_MODE (op0) != VOIDmode
9353 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
9355 HOST_WIDE_INT const_val = INTVAL (op1);
9356 unsigned HOST_WIDE_INT uconst_val = const_val;
9357 unsigned HOST_WIDE_INT max_val
9358 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
9363 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
9364 code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
9367 /* When cross-compiling, const_val might be sign-extended from
9368 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
9370 if ((HOST_WIDE_INT) (const_val & max_val)
9371 != (((HOST_WIDE_INT) 1
9372 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
9373 code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
9377 if (uconst_val < max_val)
9378 code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
9382 if (uconst_val != 0)
9383 code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
9391 /* Never return CC0; return zero instead. */
9395 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
9398 /* Given a jump insn JUMP, return the condition that will cause it to branch
9399 to its JUMP_LABEL. If the condition cannot be understood, or is an
9400 inequality floating-point comparison which needs to be reversed, 0 will
9403 If EARLIEST is nonzero, it is a pointer to a place where the earliest
9404 insn used in locating the condition was found. If a replacement test
9405 of the condition is desired, it should be placed in front of that
9406 insn and we will be sure that the inputs are still valid. */
9409 get_condition (rtx jump, rtx *earliest)
9415 /* If this is not a standard conditional jump, we can't parse it. */
9416 if (GET_CODE (jump) != JUMP_INSN
9417 || ! any_condjump_p (jump))
9419 set = pc_set (jump);
9421 cond = XEXP (SET_SRC (set), 0);
9423 /* If this branches to JUMP_LABEL when the condition is false, reverse
9426 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
9427 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
9429 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX);
9432 /* Similar to above routine, except that we also put an invariant last
9433 unless both operands are invariants. */
9436 get_condition_for_loop (const struct loop *loop, rtx x)
9438 rtx comparison = get_condition (x, (rtx*) 0);
9441 || ! loop_invariant_p (loop, XEXP (comparison, 0))
9442 || loop_invariant_p (loop, XEXP (comparison, 1)))
9445 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
9446 XEXP (comparison, 1), XEXP (comparison, 0));
9449 /* Scan the function and determine whether it has indirect (computed) jumps.
9451 This is taken mostly from flow.c; similar code exists elsewhere
9452 in the compiler. It may be useful to put this into rtlanal.c. */
9454 indirect_jump_in_function_p (rtx start)
9458 for (insn = start; insn; insn = NEXT_INSN (insn))
9459 if (computed_jump_p (insn))
9465 /* Add MEM to the LOOP_MEMS array, if appropriate. See the
9466 documentation for LOOP_MEMS for the definition of `appropriate'.
9467 This function is called from prescan_loop via for_each_rtx. */
9470 insert_loop_mem (rtx *mem, void *data ATTRIBUTE_UNUSED)
9472 struct loop_info *loop_info = data;
9479 switch (GET_CODE (m))
9485 /* We're not interested in MEMs that are only clobbered. */
9489 /* We're not interested in the MEM associated with a
9490 CONST_DOUBLE, so there's no need to traverse into this. */
9494 /* We're not interested in any MEMs that only appear in notes. */
9498 /* This is not a MEM. */
9502 /* See if we've already seen this MEM. */
9503 for (i = 0; i < loop_info->mems_idx; ++i)
9504 if (rtx_equal_p (m, loop_info->mems[i].mem))
9506 if (GET_MODE (m) != GET_MODE (loop_info->mems[i].mem))
9507 /* The modes of the two memory accesses are different. If
9508 this happens, something tricky is going on, and we just
9509 don't optimize accesses to this MEM. */
9510 loop_info->mems[i].optimize = 0;
9515 /* Resize the array, if necessary. */
9516 if (loop_info->mems_idx == loop_info->mems_allocated)
9518 if (loop_info->mems_allocated != 0)
9519 loop_info->mems_allocated *= 2;
9521 loop_info->mems_allocated = 32;
9523 loop_info->mems = (loop_mem_info *)
9524 xrealloc (loop_info->mems,
9525 loop_info->mems_allocated * sizeof (loop_mem_info));
9528 /* Actually insert the MEM. */
9529 loop_info->mems[loop_info->mems_idx].mem = m;
9530 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
9531 because we can't put it in a register. We still store it in the
9532 table, though, so that if we see the same address later, but in a
9533 non-BLK mode, we'll not think we can optimize it at that point. */
9534 loop_info->mems[loop_info->mems_idx].optimize = (GET_MODE (m) != BLKmode);
9535 loop_info->mems[loop_info->mems_idx].reg = NULL_RTX;
9536 ++loop_info->mems_idx;
9542 /* Allocate REGS->ARRAY or reallocate it if it is too small.
9544 Increment REGS->ARRAY[I].SET_IN_LOOP at the index I of each
9545 register that is modified by an insn between FROM and TO. If the
9546 value of an element of REGS->array[I].SET_IN_LOOP becomes 127 or
9547 more, stop incrementing it, to avoid overflow.
9549 Store in REGS->ARRAY[I].SINGLE_USAGE the single insn in which
9550 register I is used, if it is only used once. Otherwise, it is set
9551 to 0 (for no uses) or const0_rtx for more than one use. This
9552 parameter may be zero, in which case this processing is not done.
9554 Set REGS->ARRAY[I].MAY_NOT_OPTIMIZE nonzero if we should not
9555 optimize register I. */
9558 loop_regs_scan (const struct loop *loop, int extra_size)
9560 struct loop_regs *regs = LOOP_REGS (loop);
9562 /* last_set[n] is nonzero iff reg n has been set in the current
9563 basic block. In that case, it is the insn that last set reg n. */
9568 old_nregs = regs->num;
9569 regs->num = max_reg_num ();
9571 /* Grow the regs array if not allocated or too small. */
9572 if (regs->num >= regs->size)
9574 regs->size = regs->num + extra_size;
9576 regs->array = (struct loop_reg *)
9577 xrealloc (regs->array, regs->size * sizeof (*regs->array));
9579 /* Zero the new elements. */
9580 memset (regs->array + old_nregs, 0,
9581 (regs->size - old_nregs) * sizeof (*regs->array));
9584 /* Clear previously scanned fields but do not clear n_times_set. */
9585 for (i = 0; i < old_nregs; i++)
9587 regs->array[i].set_in_loop = 0;
9588 regs->array[i].may_not_optimize = 0;
9589 regs->array[i].single_usage = NULL_RTX;
9592 last_set = (rtx *) xcalloc (regs->num, sizeof (rtx));
9594 /* Scan the loop, recording register usage. */
9595 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
9596 insn = NEXT_INSN (insn))
9600 /* Record registers that have exactly one use. */
9601 find_single_use_in_loop (regs, insn, PATTERN (insn));
9603 /* Include uses in REG_EQUAL notes. */
9604 if (REG_NOTES (insn))
9605 find_single_use_in_loop (regs, insn, REG_NOTES (insn));
9607 if (GET_CODE (PATTERN (insn)) == SET
9608 || GET_CODE (PATTERN (insn)) == CLOBBER)
9609 count_one_set (regs, insn, PATTERN (insn), last_set);
9610 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
9613 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
9614 count_one_set (regs, insn, XVECEXP (PATTERN (insn), 0, i),
9619 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
9620 memset (last_set, 0, regs->num * sizeof (rtx));
9622 /* Invalidate all registers used for function argument passing.
9623 We check rtx_varies_p for the same reason as below, to allow
9624 optimizing PIC calculations. */
9625 if (GET_CODE (insn) == CALL_INSN)
9628 for (link = CALL_INSN_FUNCTION_USAGE (insn);
9630 link = XEXP (link, 1))
9634 if (GET_CODE (op = XEXP (link, 0)) == USE
9635 && GET_CODE (reg = XEXP (op, 0)) == REG
9636 && rtx_varies_p (reg, 1))
9637 regs->array[REGNO (reg)].may_not_optimize = 1;
9642 /* Invalidate all hard registers clobbered by calls. With one exception:
9643 a call-clobbered PIC register is still function-invariant for our
9644 purposes, since we can hoist any PIC calculations out of the loop.
9645 Thus the call to rtx_varies_p. */
9646 if (LOOP_INFO (loop)->has_call)
9647 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
9648 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i)
9649 && rtx_varies_p (regno_reg_rtx[i], 1))
9651 regs->array[i].may_not_optimize = 1;
9652 regs->array[i].set_in_loop = 1;
9655 #ifdef AVOID_CCMODE_COPIES
9656 /* Don't try to move insns which set CC registers if we should not
9657 create CCmode register copies. */
9658 for (i = regs->num - 1; i >= FIRST_PSEUDO_REGISTER; i--)
9659 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
9660 regs->array[i].may_not_optimize = 1;
9663 /* Set regs->array[I].n_times_set for the new registers. */
9664 for (i = old_nregs; i < regs->num; i++)
9665 regs->array[i].n_times_set = regs->array[i].set_in_loop;
9670 /* Returns the number of real INSNs in the LOOP. */
9673 count_insns_in_loop (const struct loop *loop)
9678 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
9679 insn = NEXT_INSN (insn))
9686 /* Move MEMs into registers for the duration of the loop. */
9689 load_mems (const struct loop *loop)
9691 struct loop_info *loop_info = LOOP_INFO (loop);
9692 struct loop_regs *regs = LOOP_REGS (loop);
9693 int maybe_never = 0;
9695 rtx p, prev_ebb_head;
9696 rtx label = NULL_RTX;
9698 /* Nonzero if the next instruction may never be executed. */
9699 int next_maybe_never = 0;
9700 unsigned int last_max_reg = max_reg_num ();
9702 if (loop_info->mems_idx == 0)
9705 /* We cannot use next_label here because it skips over normal insns. */
9706 end_label = next_nonnote_insn (loop->end);
9707 if (end_label && GET_CODE (end_label) != CODE_LABEL)
9708 end_label = NULL_RTX;
9710 /* Check to see if it's possible that some instructions in the loop are
9711 never executed. Also check if there is a goto out of the loop other
9712 than right after the end of the loop. */
9713 for (p = next_insn_in_loop (loop, loop->scan_start);
9715 p = next_insn_in_loop (loop, p))
9717 if (GET_CODE (p) == CODE_LABEL)
9719 else if (GET_CODE (p) == JUMP_INSN
9720 /* If we enter the loop in the middle, and scan
9721 around to the beginning, don't set maybe_never
9722 for that. This must be an unconditional jump,
9723 otherwise the code at the top of the loop might
9724 never be executed. Unconditional jumps are
9725 followed a by barrier then loop end. */
9726 && ! (GET_CODE (p) == JUMP_INSN
9727 && JUMP_LABEL (p) == loop->top
9728 && NEXT_INSN (NEXT_INSN (p)) == loop->end
9729 && any_uncondjump_p (p)))
9731 /* If this is a jump outside of the loop but not right
9732 after the end of the loop, we would have to emit new fixup
9733 sequences for each such label. */
9734 if (/* If we can't tell where control might go when this
9735 JUMP_INSN is executed, we must be conservative. */
9737 || (JUMP_LABEL (p) != end_label
9738 && (INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop
9739 || INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop->start)
9740 || INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop->end))))
9743 if (!any_condjump_p (p))
9744 /* Something complicated. */
9747 /* If there are any more instructions in the loop, they
9748 might not be reached. */
9749 next_maybe_never = 1;
9751 else if (next_maybe_never)
9755 /* Find start of the extended basic block that enters the loop. */
9756 for (p = loop->start;
9757 PREV_INSN (p) && GET_CODE (p) != CODE_LABEL;
9764 /* Build table of mems that get set to constant values before the
9766 for (; p != loop->start; p = NEXT_INSN (p))
9767 cselib_process_insn (p);
9769 /* Actually move the MEMs. */
9770 for (i = 0; i < loop_info->mems_idx; ++i)
9772 regset_head load_copies;
9773 regset_head store_copies;
9776 rtx mem = loop_info->mems[i].mem;
9779 if (MEM_VOLATILE_P (mem)
9780 || loop_invariant_p (loop, XEXP (mem, 0)) != 1)
9781 /* There's no telling whether or not MEM is modified. */
9782 loop_info->mems[i].optimize = 0;
9784 /* Go through the MEMs written to in the loop to see if this
9785 one is aliased by one of them. */
9786 mem_list_entry = loop_info->store_mems;
9787 while (mem_list_entry)
9789 if (rtx_equal_p (mem, XEXP (mem_list_entry, 0)))
9791 else if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
9794 /* MEM is indeed aliased by this store. */
9795 loop_info->mems[i].optimize = 0;
9798 mem_list_entry = XEXP (mem_list_entry, 1);
9801 if (flag_float_store && written
9802 && GET_MODE_CLASS (GET_MODE (mem)) == MODE_FLOAT)
9803 loop_info->mems[i].optimize = 0;
9805 /* If this MEM is written to, we must be sure that there
9806 are no reads from another MEM that aliases this one. */
9807 if (loop_info->mems[i].optimize && written)
9811 for (j = 0; j < loop_info->mems_idx; ++j)
9815 else if (true_dependence (mem,
9817 loop_info->mems[j].mem,
9820 /* It's not safe to hoist loop_info->mems[i] out of
9821 the loop because writes to it might not be
9822 seen by reads from loop_info->mems[j]. */
9823 loop_info->mems[i].optimize = 0;
9829 if (maybe_never && may_trap_p (mem))
9830 /* We can't access the MEM outside the loop; it might
9831 cause a trap that wouldn't have happened otherwise. */
9832 loop_info->mems[i].optimize = 0;
9834 if (!loop_info->mems[i].optimize)
9835 /* We thought we were going to lift this MEM out of the
9836 loop, but later discovered that we could not. */
9839 INIT_REG_SET (&load_copies);
9840 INIT_REG_SET (&store_copies);
9842 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
9843 order to keep scan_loop from moving stores to this MEM
9844 out of the loop just because this REG is neither a
9845 user-variable nor used in the loop test. */
9846 reg = gen_reg_rtx (GET_MODE (mem));
9847 REG_USERVAR_P (reg) = 1;
9848 loop_info->mems[i].reg = reg;
9850 /* Now, replace all references to the MEM with the
9851 corresponding pseudos. */
9853 for (p = next_insn_in_loop (loop, loop->scan_start);
9855 p = next_insn_in_loop (loop, p))
9861 set = single_set (p);
9863 /* See if this copies the mem into a register that isn't
9864 modified afterwards. We'll try to do copy propagation
9865 a little further on. */
9867 /* @@@ This test is _way_ too conservative. */
9869 && GET_CODE (SET_DEST (set)) == REG
9870 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
9871 && REGNO (SET_DEST (set)) < last_max_reg
9872 && regs->array[REGNO (SET_DEST (set))].n_times_set == 1
9873 && rtx_equal_p (SET_SRC (set), mem))
9874 SET_REGNO_REG_SET (&load_copies, REGNO (SET_DEST (set)));
9876 /* See if this copies the mem from a register that isn't
9877 modified afterwards. We'll try to remove the
9878 redundant copy later on by doing a little register
9879 renaming and copy propagation. This will help
9880 to untangle things for the BIV detection code. */
9883 && GET_CODE (SET_SRC (set)) == REG
9884 && REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER
9885 && REGNO (SET_SRC (set)) < last_max_reg
9886 && regs->array[REGNO (SET_SRC (set))].n_times_set == 1
9887 && rtx_equal_p (SET_DEST (set), mem))
9888 SET_REGNO_REG_SET (&store_copies, REGNO (SET_SRC (set)));
9890 /* If this is a call which uses / clobbers this memory
9891 location, we must not change the interface here. */
9892 if (GET_CODE (p) == CALL_INSN
9893 && reg_mentioned_p (loop_info->mems[i].mem,
9894 CALL_INSN_FUNCTION_USAGE (p)))
9897 loop_info->mems[i].optimize = 0;
9901 /* Replace the memory reference with the shadow register. */
9902 replace_loop_mems (p, loop_info->mems[i].mem,
9903 loop_info->mems[i].reg, written);
9906 if (GET_CODE (p) == CODE_LABEL
9907 || GET_CODE (p) == JUMP_INSN)
9911 if (! loop_info->mems[i].optimize)
9912 ; /* We found we couldn't do the replacement, so do nothing. */
9913 else if (! apply_change_group ())
9914 /* We couldn't replace all occurrences of the MEM. */
9915 loop_info->mems[i].optimize = 0;
9918 /* Load the memory immediately before LOOP->START, which is
9919 the NOTE_LOOP_BEG. */
9920 cselib_val *e = cselib_lookup (mem, VOIDmode, 0);
9924 struct elt_loc_list *const_equiv = 0;
9928 struct elt_loc_list *equiv;
9929 struct elt_loc_list *best_equiv = 0;
9930 for (equiv = e->locs; equiv; equiv = equiv->next)
9932 if (CONSTANT_P (equiv->loc))
9933 const_equiv = equiv;
9934 else if (GET_CODE (equiv->loc) == REG
9935 /* Extending hard register lifetimes causes crash
9936 on SRC targets. Doing so on non-SRC is
9937 probably also not good idea, since we most
9938 probably have pseudoregister equivalence as
9940 && REGNO (equiv->loc) >= FIRST_PSEUDO_REGISTER)
9943 /* Use the constant equivalence if that is cheap enough. */
9945 best_equiv = const_equiv;
9946 else if (const_equiv
9947 && (rtx_cost (const_equiv->loc, SET)
9948 <= rtx_cost (best_equiv->loc, SET)))
9950 best_equiv = const_equiv;
9954 /* If best_equiv is nonzero, we know that MEM is set to a
9955 constant or register before the loop. We will use this
9956 knowledge to initialize the shadow register with that
9957 constant or reg rather than by loading from MEM. */
9959 best = copy_rtx (best_equiv->loc);
9962 set = gen_move_insn (reg, best);
9963 set = loop_insn_hoist (loop, set);
9966 for (p = prev_ebb_head; p != loop->start; p = NEXT_INSN (p))
9967 if (REGNO_LAST_UID (REGNO (best)) == INSN_UID (p))
9969 REGNO_LAST_UID (REGNO (best)) = INSN_UID (set);
9975 set_unique_reg_note (set, REG_EQUAL, copy_rtx (const_equiv->loc));
9979 if (label == NULL_RTX)
9981 label = gen_label_rtx ();
9982 emit_label_after (label, loop->end);
9985 /* Store the memory immediately after END, which is
9986 the NOTE_LOOP_END. */
9987 set = gen_move_insn (copy_rtx (mem), reg);
9988 loop_insn_emit_after (loop, 0, label, set);
9991 if (loop_dump_stream)
9993 fprintf (loop_dump_stream, "Hoisted regno %d %s from ",
9994 REGNO (reg), (written ? "r/w" : "r/o"));
9995 print_rtl (loop_dump_stream, mem);
9996 fputc ('\n', loop_dump_stream);
9999 /* Attempt a bit of copy propagation. This helps untangle the
10000 data flow, and enables {basic,general}_induction_var to find
10002 EXECUTE_IF_SET_IN_REG_SET
10003 (&load_copies, FIRST_PSEUDO_REGISTER, j,
10005 try_copy_prop (loop, reg, j);
10007 CLEAR_REG_SET (&load_copies);
10009 EXECUTE_IF_SET_IN_REG_SET
10010 (&store_copies, FIRST_PSEUDO_REGISTER, j,
10012 try_swap_copy_prop (loop, reg, j);
10014 CLEAR_REG_SET (&store_copies);
10018 if (label != NULL_RTX && end_label != NULL_RTX)
10020 /* Now, we need to replace all references to the previous exit
10021 label with the new one. */
10022 replace_label_data rr;
10025 rr.update_label_nuses = true;
10027 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
10029 for_each_rtx (&p, replace_label, &rr);
10036 /* For communication between note_reg_stored and its caller. */
10037 struct note_reg_stored_arg
10043 /* Called via note_stores, record in SET_SEEN whether X, which is written,
10044 is equal to ARG. */
10046 note_reg_stored (rtx x, rtx setter ATTRIBUTE_UNUSED, void *arg)
10048 struct note_reg_stored_arg *t = (struct note_reg_stored_arg *) arg;
10053 /* Try to replace every occurrence of pseudo REGNO with REPLACEMENT.
10054 There must be exactly one insn that sets this pseudo; it will be
10055 deleted if all replacements succeed and we can prove that the register
10056 is not used after the loop. */
10059 try_copy_prop (const struct loop *loop, rtx replacement, unsigned int regno)
10061 /* This is the reg that we are copying from. */
10062 rtx reg_rtx = regno_reg_rtx[regno];
10065 /* These help keep track of whether we replaced all uses of the reg. */
10066 int replaced_last = 0;
10067 int store_is_first = 0;
10069 for (insn = next_insn_in_loop (loop, loop->scan_start);
10071 insn = next_insn_in_loop (loop, insn))
10075 /* Only substitute within one extended basic block from the initializing
10077 if (GET_CODE (insn) == CODE_LABEL && init_insn)
10080 if (! INSN_P (insn))
10083 /* Is this the initializing insn? */
10084 set = single_set (insn);
10086 && GET_CODE (SET_DEST (set)) == REG
10087 && REGNO (SET_DEST (set)) == regno)
10093 if (REGNO_FIRST_UID (regno) == INSN_UID (insn))
10094 store_is_first = 1;
10097 /* Only substitute after seeing the initializing insn. */
10098 if (init_insn && insn != init_insn)
10100 struct note_reg_stored_arg arg;
10102 replace_loop_regs (insn, reg_rtx, replacement);
10103 if (REGNO_LAST_UID (regno) == INSN_UID (insn))
10106 /* Stop replacing when REPLACEMENT is modified. */
10107 arg.reg = replacement;
10109 note_stores (PATTERN (insn), note_reg_stored, &arg);
10112 rtx note = find_reg_note (insn, REG_EQUAL, NULL);
10114 /* It is possible that we've turned previously valid REG_EQUAL to
10115 invalid, as we change the REGNO to REPLACEMENT and unlike REGNO,
10116 REPLACEMENT is modified, we get different meaning. */
10117 if (note && reg_mentioned_p (replacement, XEXP (note, 0)))
10118 remove_note (insn, note);
10125 if (apply_change_group ())
10127 if (loop_dump_stream)
10128 fprintf (loop_dump_stream, " Replaced reg %d", regno);
10129 if (store_is_first && replaced_last)
10134 /* Assume we're just deleting INIT_INSN. */
10136 /* Look for REG_RETVAL note. If we're deleting the end of
10137 the libcall sequence, the whole sequence can go. */
10138 retval_note = find_reg_note (init_insn, REG_RETVAL, NULL_RTX);
10139 /* If we found a REG_RETVAL note, find the first instruction
10140 in the sequence. */
10142 first = XEXP (retval_note, 0);
10144 /* Delete the instructions. */
10145 loop_delete_insns (first, init_insn);
10147 if (loop_dump_stream)
10148 fprintf (loop_dump_stream, ".\n");
10152 /* Replace all the instructions from FIRST up to and including LAST
10153 with NOTE_INSN_DELETED notes. */
10156 loop_delete_insns (rtx first, rtx last)
10160 if (loop_dump_stream)
10161 fprintf (loop_dump_stream, ", deleting init_insn (%d)",
10163 delete_insn (first);
10165 /* If this was the LAST instructions we're supposed to delete,
10170 first = NEXT_INSN (first);
10174 /* Try to replace occurrences of pseudo REGNO with REPLACEMENT within
10175 loop LOOP if the order of the sets of these registers can be
10176 swapped. There must be exactly one insn within the loop that sets
10177 this pseudo followed immediately by a move insn that sets
10178 REPLACEMENT with REGNO. */
10180 try_swap_copy_prop (const struct loop *loop, rtx replacement,
10181 unsigned int regno)
10184 rtx set = NULL_RTX;
10185 unsigned int new_regno;
10187 new_regno = REGNO (replacement);
10189 for (insn = next_insn_in_loop (loop, loop->scan_start);
10191 insn = next_insn_in_loop (loop, insn))
10193 /* Search for the insn that copies REGNO to NEW_REGNO? */
10195 && (set = single_set (insn))
10196 && GET_CODE (SET_DEST (set)) == REG
10197 && REGNO (SET_DEST (set)) == new_regno
10198 && GET_CODE (SET_SRC (set)) == REG
10199 && REGNO (SET_SRC (set)) == regno)
10203 if (insn != NULL_RTX)
10208 /* Some DEF-USE info would come in handy here to make this
10209 function more general. For now, just check the previous insn
10210 which is the most likely candidate for setting REGNO. */
10212 prev_insn = PREV_INSN (insn);
10215 && (prev_set = single_set (prev_insn))
10216 && GET_CODE (SET_DEST (prev_set)) == REG
10217 && REGNO (SET_DEST (prev_set)) == regno)
10220 (set (reg regno) (expr))
10221 (set (reg new_regno) (reg regno))
10223 so try converting this to:
10224 (set (reg new_regno) (expr))
10225 (set (reg regno) (reg new_regno))
10227 The former construct is often generated when a global
10228 variable used for an induction variable is shadowed by a
10229 register (NEW_REGNO). The latter construct improves the
10230 chances of GIV replacement and BIV elimination. */
10232 validate_change (prev_insn, &SET_DEST (prev_set),
10234 validate_change (insn, &SET_DEST (set),
10236 validate_change (insn, &SET_SRC (set),
10239 if (apply_change_group ())
10241 if (loop_dump_stream)
10242 fprintf (loop_dump_stream,
10243 " Swapped set of reg %d at %d with reg %d at %d.\n",
10244 regno, INSN_UID (insn),
10245 new_regno, INSN_UID (prev_insn));
10247 /* Update first use of REGNO. */
10248 if (REGNO_FIRST_UID (regno) == INSN_UID (prev_insn))
10249 REGNO_FIRST_UID (regno) = INSN_UID (insn);
10251 /* Now perform copy propagation to hopefully
10252 remove all uses of REGNO within the loop. */
10253 try_copy_prop (loop, replacement, regno);
10259 /* Worker function for find_mem_in_note, called via for_each_rtx. */
10262 find_mem_in_note_1 (rtx *x, void *data)
10264 if (*x != NULL_RTX && GET_CODE (*x) == MEM)
10266 rtx *res = (rtx *) data;
10273 /* Returns the first MEM found in NOTE by depth-first search. */
10276 find_mem_in_note (rtx note)
10278 if (note && for_each_rtx (¬e, find_mem_in_note_1, ¬e))
10283 /* Replace MEM with its associated pseudo register. This function is
10284 called from load_mems via for_each_rtx. DATA is actually a pointer
10285 to a structure describing the instruction currently being scanned
10286 and the MEM we are currently replacing. */
10289 replace_loop_mem (rtx *mem, void *data)
10291 loop_replace_args *args = (loop_replace_args *) data;
10297 switch (GET_CODE (m))
10303 /* We're not interested in the MEM associated with a
10304 CONST_DOUBLE, so there's no need to traverse into one. */
10308 /* This is not a MEM. */
10312 if (!rtx_equal_p (args->match, m))
10313 /* This is not the MEM we are currently replacing. */
10316 /* Actually replace the MEM. */
10317 validate_change (args->insn, mem, args->replacement, 1);
10323 replace_loop_mems (rtx insn, rtx mem, rtx reg, int written)
10325 loop_replace_args args;
10329 args.replacement = reg;
10331 for_each_rtx (&insn, replace_loop_mem, &args);
10333 /* If we hoist a mem write out of the loop, then REG_EQUAL
10334 notes referring to the mem are no longer valid. */
10340 for (link = ®_NOTES (insn); (note = *link); link = &XEXP (note, 1))
10342 if (REG_NOTE_KIND (note) == REG_EQUAL
10343 && (sub = find_mem_in_note (note))
10344 && true_dependence (mem, VOIDmode, sub, rtx_varies_p))
10346 /* Remove the note. */
10347 validate_change (NULL_RTX, link, XEXP (note, 1), 1);
10354 /* Replace one register with another. Called through for_each_rtx; PX points
10355 to the rtx being scanned. DATA is actually a pointer to
10356 a structure of arguments. */
10359 replace_loop_reg (rtx *px, void *data)
10362 loop_replace_args *args = (loop_replace_args *) data;
10367 if (x == args->match)
10368 validate_change (args->insn, px, args->replacement, 1);
10374 replace_loop_regs (rtx insn, rtx reg, rtx replacement)
10376 loop_replace_args args;
10380 args.replacement = replacement;
10382 for_each_rtx (&insn, replace_loop_reg, &args);
10385 /* Emit insn for PATTERN after WHERE_INSN in basic block WHERE_BB
10386 (ignored in the interim). */
10389 loop_insn_emit_after (const struct loop *loop ATTRIBUTE_UNUSED,
10390 basic_block where_bb ATTRIBUTE_UNUSED, rtx where_insn,
10393 return emit_insn_after (pattern, where_insn);
10397 /* If WHERE_INSN is nonzero emit insn for PATTERN before WHERE_INSN
10398 in basic block WHERE_BB (ignored in the interim) within the loop
10399 otherwise hoist PATTERN into the loop pre-header. */
10402 loop_insn_emit_before (const struct loop *loop,
10403 basic_block where_bb ATTRIBUTE_UNUSED,
10404 rtx where_insn, rtx pattern)
10407 return loop_insn_hoist (loop, pattern);
10408 return emit_insn_before (pattern, where_insn);
10412 /* Emit call insn for PATTERN before WHERE_INSN in basic block
10413 WHERE_BB (ignored in the interim) within the loop. */
10416 loop_call_insn_emit_before (const struct loop *loop ATTRIBUTE_UNUSED,
10417 basic_block where_bb ATTRIBUTE_UNUSED,
10418 rtx where_insn, rtx pattern)
10420 return emit_call_insn_before (pattern, where_insn);
10424 /* Hoist insn for PATTERN into the loop pre-header. */
10427 loop_insn_hoist (const struct loop *loop, rtx pattern)
10429 return loop_insn_emit_before (loop, 0, loop->start, pattern);
10433 /* Hoist call insn for PATTERN into the loop pre-header. */
10436 loop_call_insn_hoist (const struct loop *loop, rtx pattern)
10438 return loop_call_insn_emit_before (loop, 0, loop->start, pattern);
10442 /* Sink insn for PATTERN after the loop end. */
10445 loop_insn_sink (const struct loop *loop, rtx pattern)
10447 return loop_insn_emit_before (loop, 0, loop->sink, pattern);
10450 /* bl->final_value can be either general_operand or PLUS of general_operand
10451 and constant. Emit sequence of instructions to load it into REG. */
10453 gen_load_of_final_value (rtx reg, rtx final_value)
10457 final_value = force_operand (final_value, reg);
10458 if (final_value != reg)
10459 emit_move_insn (reg, final_value);
10460 seq = get_insns ();
10465 /* If the loop has multiple exits, emit insn for PATTERN before the
10466 loop to ensure that it will always be executed no matter how the
10467 loop exits. Otherwise, emit the insn for PATTERN after the loop,
10468 since this is slightly more efficient. */
10471 loop_insn_sink_or_swim (const struct loop *loop, rtx pattern)
10473 if (loop->exit_count)
10474 return loop_insn_hoist (loop, pattern);
10476 return loop_insn_sink (loop, pattern);
10480 loop_ivs_dump (const struct loop *loop, FILE *file, int verbose)
10482 struct iv_class *bl;
10485 if (! loop || ! file)
10488 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
10491 fprintf (file, "Loop %d: %d IV classes\n", loop->num, iv_num);
10493 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
10495 loop_iv_class_dump (bl, file, verbose);
10496 fputc ('\n', file);
10502 loop_iv_class_dump (const struct iv_class *bl, FILE *file,
10503 int verbose ATTRIBUTE_UNUSED)
10505 struct induction *v;
10509 if (! bl || ! file)
10512 fprintf (file, "IV class for reg %d, benefit %d\n",
10513 bl->regno, bl->total_benefit);
10515 fprintf (file, " Init insn %d", INSN_UID (bl->init_insn));
10516 if (bl->initial_value)
10518 fprintf (file, ", init val: ");
10519 print_simple_rtl (file, bl->initial_value);
10521 if (bl->initial_test)
10523 fprintf (file, ", init test: ");
10524 print_simple_rtl (file, bl->initial_test);
10526 fputc ('\n', file);
10528 if (bl->final_value)
10530 fprintf (file, " Final val: ");
10531 print_simple_rtl (file, bl->final_value);
10532 fputc ('\n', file);
10535 if ((incr = biv_total_increment (bl)))
10537 fprintf (file, " Total increment: ");
10538 print_simple_rtl (file, incr);
10539 fputc ('\n', file);
10542 /* List the increments. */
10543 for (i = 0, v = bl->biv; v; v = v->next_iv, i++)
10545 fprintf (file, " Inc%d: insn %d, incr: ", i, INSN_UID (v->insn));
10546 print_simple_rtl (file, v->add_val);
10547 fputc ('\n', file);
10550 /* List the givs. */
10551 for (i = 0, v = bl->giv; v; v = v->next_iv, i++)
10553 fprintf (file, " Giv%d: insn %d, benefit %d, ",
10554 i, INSN_UID (v->insn), v->benefit);
10555 if (v->giv_type == DEST_ADDR)
10556 print_simple_rtl (file, v->mem);
10558 print_simple_rtl (file, single_set (v->insn));
10559 fputc ('\n', file);
10565 loop_biv_dump (const struct induction *v, FILE *file, int verbose)
10572 REGNO (v->dest_reg), INSN_UID (v->insn));
10573 fprintf (file, " const ");
10574 print_simple_rtl (file, v->add_val);
10576 if (verbose && v->final_value)
10578 fputc ('\n', file);
10579 fprintf (file, " final ");
10580 print_simple_rtl (file, v->final_value);
10583 fputc ('\n', file);
10588 loop_giv_dump (const struct induction *v, FILE *file, int verbose)
10593 if (v->giv_type == DEST_REG)
10594 fprintf (file, "Giv %d: insn %d",
10595 REGNO (v->dest_reg), INSN_UID (v->insn));
10597 fprintf (file, "Dest address: insn %d",
10598 INSN_UID (v->insn));
10600 fprintf (file, " src reg %d benefit %d",
10601 REGNO (v->src_reg), v->benefit);
10602 fprintf (file, " lifetime %d",
10605 if (v->replaceable)
10606 fprintf (file, " replaceable");
10608 if (v->no_const_addval)
10609 fprintf (file, " ncav");
10611 if (v->ext_dependent)
10613 switch (GET_CODE (v->ext_dependent))
10616 fprintf (file, " ext se");
10619 fprintf (file, " ext ze");
10622 fprintf (file, " ext tr");
10629 fputc ('\n', file);
10630 fprintf (file, " mult ");
10631 print_simple_rtl (file, v->mult_val);
10633 fputc ('\n', file);
10634 fprintf (file, " add ");
10635 print_simple_rtl (file, v->add_val);
10637 if (verbose && v->final_value)
10639 fputc ('\n', file);
10640 fprintf (file, " final ");
10641 print_simple_rtl (file, v->final_value);
10644 fputc ('\n', file);
10649 debug_ivs (const struct loop *loop)
10651 loop_ivs_dump (loop, stderr, 1);
10656 debug_iv_class (const struct iv_class *bl)
10658 loop_iv_class_dump (bl, stderr, 1);
10663 debug_biv (const struct induction *v)
10665 loop_biv_dump (v, stderr, 1);
10670 debug_giv (const struct induction *v)
10672 loop_giv_dump (v, stderr, 1);
10676 #define LOOP_BLOCK_NUM_1(INSN) \
10677 ((INSN) ? (BLOCK_FOR_INSN (INSN) ? BLOCK_NUM (INSN) : - 1) : -1)
10679 /* The notes do not have an assigned block, so look at the next insn. */
10680 #define LOOP_BLOCK_NUM(INSN) \
10681 ((INSN) ? (GET_CODE (INSN) == NOTE \
10682 ? LOOP_BLOCK_NUM_1 (next_nonnote_insn (INSN)) \
10683 : LOOP_BLOCK_NUM_1 (INSN)) \
10686 #define LOOP_INSN_UID(INSN) ((INSN) ? INSN_UID (INSN) : -1)
10689 loop_dump_aux (const struct loop *loop, FILE *file,
10690 int verbose ATTRIBUTE_UNUSED)
10694 if (! loop || ! file)
10697 /* Print diagnostics to compare our concept of a loop with
10698 what the loop notes say. */
10699 if (! PREV_INSN (loop->first->head)
10700 || GET_CODE (PREV_INSN (loop->first->head)) != NOTE
10701 || NOTE_LINE_NUMBER (PREV_INSN (loop->first->head))
10702 != NOTE_INSN_LOOP_BEG)
10703 fprintf (file, ";; No NOTE_INSN_LOOP_BEG at %d\n",
10704 INSN_UID (PREV_INSN (loop->first->head)));
10705 if (! NEXT_INSN (loop->last->end)
10706 || GET_CODE (NEXT_INSN (loop->last->end)) != NOTE
10707 || NOTE_LINE_NUMBER (NEXT_INSN (loop->last->end))
10708 != NOTE_INSN_LOOP_END)
10709 fprintf (file, ";; No NOTE_INSN_LOOP_END at %d\n",
10710 INSN_UID (NEXT_INSN (loop->last->end)));
10715 ";; start %d (%d), cont dom %d (%d), cont %d (%d), vtop %d (%d), end %d (%d)\n",
10716 LOOP_BLOCK_NUM (loop->start),
10717 LOOP_INSN_UID (loop->start),
10718 LOOP_BLOCK_NUM (loop->cont),
10719 LOOP_INSN_UID (loop->cont),
10720 LOOP_BLOCK_NUM (loop->cont),
10721 LOOP_INSN_UID (loop->cont),
10722 LOOP_BLOCK_NUM (loop->vtop),
10723 LOOP_INSN_UID (loop->vtop),
10724 LOOP_BLOCK_NUM (loop->end),
10725 LOOP_INSN_UID (loop->end));
10726 fprintf (file, ";; top %d (%d), scan start %d (%d)\n",
10727 LOOP_BLOCK_NUM (loop->top),
10728 LOOP_INSN_UID (loop->top),
10729 LOOP_BLOCK_NUM (loop->scan_start),
10730 LOOP_INSN_UID (loop->scan_start));
10731 fprintf (file, ";; exit_count %d", loop->exit_count);
10732 if (loop->exit_count)
10734 fputs (", labels:", file);
10735 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
10737 fprintf (file, " %d ",
10738 LOOP_INSN_UID (XEXP (label, 0)));
10741 fputs ("\n", file);
10743 /* This can happen when a marked loop appears as two nested loops,
10744 say from while (a || b) {}. The inner loop won't match
10745 the loop markers but the outer one will. */
10746 if (LOOP_BLOCK_NUM (loop->cont) != loop->latch->index)
10747 fprintf (file, ";; NOTE_INSN_LOOP_CONT not in loop latch\n");
10751 /* Call this function from the debugger to dump LOOP. */
10754 debug_loop (const struct loop *loop)
10756 flow_loop_dump (loop, stderr, loop_dump_aux, 1);
10759 /* Call this function from the debugger to dump LOOPS. */
10762 debug_loops (const struct loops *loops)
10764 flow_loops_dump (loops, stderr, loop_dump_aux, 1);