1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
29 #include "hard-reg-set.h"
30 #include "insn-config.h"
41 /* Forward declarations */
42 static void set_of_1 (rtx, const_rtx, void *);
43 static bool covers_regno_p (const_rtx, unsigned int);
44 static bool covers_regno_no_parallel_p (const_rtx, unsigned int);
45 static int rtx_referenced_p_1 (rtx *, void *);
46 static int computed_jump_p_1 (const_rtx);
47 static void parms_set (rtx, const_rtx, void *);
49 static unsigned HOST_WIDE_INT cached_nonzero_bits (const_rtx, enum machine_mode,
50 const_rtx, enum machine_mode,
51 unsigned HOST_WIDE_INT);
52 static unsigned HOST_WIDE_INT nonzero_bits1 (const_rtx, enum machine_mode,
53 const_rtx, enum machine_mode,
54 unsigned HOST_WIDE_INT);
55 static unsigned int cached_num_sign_bit_copies (const_rtx, enum machine_mode, const_rtx,
58 static unsigned int num_sign_bit_copies1 (const_rtx, enum machine_mode, const_rtx,
59 enum machine_mode, unsigned int);
61 /* Offset of the first 'e', 'E' or 'V' operand for each rtx code, or
62 -1 if a code has no such operand. */
63 static int non_rtx_starting_operands[NUM_RTX_CODE];
65 /* Bit flags that specify the machine subtype we are compiling for.
66 Bits are tested using macros TARGET_... defined in the tm.h file
67 and set by `-m...' switches. Must be defined in rtlanal.c. */
71 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
72 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
73 SIGN_EXTEND then while narrowing we also have to enforce the
74 representation and sign-extend the value to mode DESTINATION_REP.
76 If the value is already sign-extended to DESTINATION_REP mode we
77 can just switch to DESTINATION mode on it. For each pair of
78 integral modes SOURCE and DESTINATION, when truncating from SOURCE
79 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
80 contains the number of high-order bits in SOURCE that have to be
81 copies of the sign-bit so that we can do this mode-switch to
85 num_sign_bit_copies_in_rep[MAX_MODE_INT + 1][MAX_MODE_INT + 1];
87 /* Return 1 if the value of X is unstable
88 (would be different at a different point in the program).
89 The frame pointer, arg pointer, etc. are considered stable
90 (within one function) and so is anything marked `unchanging'. */
93 rtx_unstable_p (const_rtx x)
95 const RTX_CODE code = GET_CODE (x);
102 return !MEM_READONLY_P (x) || rtx_unstable_p (XEXP (x, 0));
114 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
115 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
116 /* The arg pointer varies if it is not a fixed register. */
117 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
119 #ifndef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
120 /* ??? When call-clobbered, the value is stable modulo the restore
121 that must happen after a call. This currently screws up local-alloc
122 into believing that the restore is not needed. */
123 if (x == pic_offset_table_rtx)
129 if (MEM_VOLATILE_P (x))
138 fmt = GET_RTX_FORMAT (code);
139 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
142 if (rtx_unstable_p (XEXP (x, i)))
145 else if (fmt[i] == 'E')
148 for (j = 0; j < XVECLEN (x, i); j++)
149 if (rtx_unstable_p (XVECEXP (x, i, j)))
156 /* Return 1 if X has a value that can vary even between two
157 executions of the program. 0 means X can be compared reliably
158 against certain constants or near-constants.
159 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
160 zero, we are slightly more conservative.
161 The frame pointer and the arg pointer are considered constant. */
164 rtx_varies_p (const_rtx x, bool for_alias)
177 return !MEM_READONLY_P (x) || rtx_varies_p (XEXP (x, 0), for_alias);
189 /* Note that we have to test for the actual rtx used for the frame
190 and arg pointers and not just the register number in case we have
191 eliminated the frame and/or arg pointer and are using it
193 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
194 /* The arg pointer varies if it is not a fixed register. */
195 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
197 if (x == pic_offset_table_rtx
198 #ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
199 /* ??? When call-clobbered, the value is stable modulo the restore
200 that must happen after a call. This currently screws up
201 local-alloc into believing that the restore is not needed, so we
202 must return 0 only if we are called from alias analysis. */
210 /* The operand 0 of a LO_SUM is considered constant
211 (in fact it is related specifically to operand 1)
212 during alias analysis. */
213 return (! for_alias && rtx_varies_p (XEXP (x, 0), for_alias))
214 || rtx_varies_p (XEXP (x, 1), for_alias);
217 if (MEM_VOLATILE_P (x))
226 fmt = GET_RTX_FORMAT (code);
227 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
230 if (rtx_varies_p (XEXP (x, i), for_alias))
233 else if (fmt[i] == 'E')
236 for (j = 0; j < XVECLEN (x, i); j++)
237 if (rtx_varies_p (XVECEXP (x, i, j), for_alias))
244 /* Return nonzero if the use of X as an address in a MEM can cause a trap.
245 MODE is the mode of the MEM (not that of X) and UNALIGNED_MEMS controls
246 whether nonzero is returned for unaligned memory accesses on strict
247 alignment machines. */
250 rtx_addr_can_trap_p_1 (const_rtx x, HOST_WIDE_INT offset, HOST_WIDE_INT size,
251 enum machine_mode mode, bool unaligned_mems)
253 enum rtx_code code = GET_CODE (x);
257 && GET_MODE_SIZE (mode) != 0)
259 HOST_WIDE_INT actual_offset = offset;
260 #ifdef SPARC_STACK_BOUNDARY_HACK
261 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
262 the real alignment of %sp. However, when it does this, the
263 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
264 if (SPARC_STACK_BOUNDARY_HACK
265 && (x == stack_pointer_rtx || x == hard_frame_pointer_rtx))
266 actual_offset -= STACK_POINTER_OFFSET;
269 if (actual_offset % GET_MODE_SIZE (mode) != 0)
276 if (SYMBOL_REF_WEAK (x))
278 if (!CONSTANT_POOL_ADDRESS_P (x))
281 HOST_WIDE_INT decl_size;
286 size = GET_MODE_SIZE (mode);
290 /* If the size of the access or of the symbol is unknown,
292 decl = SYMBOL_REF_DECL (x);
294 /* Else check that the access is in bounds. TODO: restructure
295 expr_size/tree_expr_size/int_expr_size and just use the latter. */
298 else if (DECL_P (decl) && DECL_SIZE_UNIT (decl))
299 decl_size = (host_integerp (DECL_SIZE_UNIT (decl), 0)
300 ? tree_low_cst (DECL_SIZE_UNIT (decl), 0)
302 else if (TREE_CODE (decl) == STRING_CST)
303 decl_size = TREE_STRING_LENGTH (decl);
304 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl)))
305 decl_size = int_size_in_bytes (TREE_TYPE (decl));
309 return (decl_size <= 0 ? offset != 0 : offset + size > decl_size);
318 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
319 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
320 || x == stack_pointer_rtx
321 /* The arg pointer varies if it is not a fixed register. */
322 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
324 /* All of the virtual frame registers are stack references. */
325 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
326 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
331 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
332 mode, unaligned_mems);
335 /* An address is assumed not to trap if:
336 - it is the pic register plus a constant. */
337 if (XEXP (x, 0) == pic_offset_table_rtx && CONSTANT_P (XEXP (x, 1)))
340 /* - or it is an address that can't trap plus a constant integer,
341 with the proper remainder modulo the mode size if we are
342 considering unaligned memory references. */
343 if (CONST_INT_P (XEXP (x, 1))
344 && !rtx_addr_can_trap_p_1 (XEXP (x, 0), offset + INTVAL (XEXP (x, 1)),
345 size, mode, unaligned_mems))
352 return rtx_addr_can_trap_p_1 (XEXP (x, 1), offset, size,
353 mode, unaligned_mems);
360 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
361 mode, unaligned_mems);
367 /* If it isn't one of the case above, it can cause a trap. */
371 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
374 rtx_addr_can_trap_p (const_rtx x)
376 return rtx_addr_can_trap_p_1 (x, 0, 0, VOIDmode, false);
379 /* Return true if X is an address that is known to not be zero. */
382 nonzero_address_p (const_rtx x)
384 const enum rtx_code code = GET_CODE (x);
389 return !SYMBOL_REF_WEAK (x);
395 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
396 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
397 || x == stack_pointer_rtx
398 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
400 /* All of the virtual frame registers are stack references. */
401 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
402 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
407 return nonzero_address_p (XEXP (x, 0));
410 if (CONST_INT_P (XEXP (x, 1)))
411 return nonzero_address_p (XEXP (x, 0));
412 /* Handle PIC references. */
413 else if (XEXP (x, 0) == pic_offset_table_rtx
414 && CONSTANT_P (XEXP (x, 1)))
419 /* Similar to the above; allow positive offsets. Further, since
420 auto-inc is only allowed in memories, the register must be a
422 if (CONST_INT_P (XEXP (x, 1))
423 && INTVAL (XEXP (x, 1)) > 0)
425 return nonzero_address_p (XEXP (x, 0));
428 /* Similarly. Further, the offset is always positive. */
435 return nonzero_address_p (XEXP (x, 0));
438 return nonzero_address_p (XEXP (x, 1));
444 /* If it isn't one of the case above, might be zero. */
448 /* Return 1 if X refers to a memory location whose address
449 cannot be compared reliably with constant addresses,
450 or if X refers to a BLKmode memory object.
451 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
452 zero, we are slightly more conservative. */
455 rtx_addr_varies_p (const_rtx x, bool for_alias)
466 return GET_MODE (x) == BLKmode || rtx_varies_p (XEXP (x, 0), for_alias);
468 fmt = GET_RTX_FORMAT (code);
469 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
472 if (rtx_addr_varies_p (XEXP (x, i), for_alias))
475 else if (fmt[i] == 'E')
478 for (j = 0; j < XVECLEN (x, i); j++)
479 if (rtx_addr_varies_p (XVECEXP (x, i, j), for_alias))
485 /* Return the value of the integer term in X, if one is apparent;
487 Only obvious integer terms are detected.
488 This is used in cse.c with the `related_value' field. */
491 get_integer_term (const_rtx x)
493 if (GET_CODE (x) == CONST)
496 if (GET_CODE (x) == MINUS
497 && CONST_INT_P (XEXP (x, 1)))
498 return - INTVAL (XEXP (x, 1));
499 if (GET_CODE (x) == PLUS
500 && CONST_INT_P (XEXP (x, 1)))
501 return INTVAL (XEXP (x, 1));
505 /* If X is a constant, return the value sans apparent integer term;
507 Only obvious integer terms are detected. */
510 get_related_value (const_rtx x)
512 if (GET_CODE (x) != CONST)
515 if (GET_CODE (x) == PLUS
516 && CONST_INT_P (XEXP (x, 1)))
518 else if (GET_CODE (x) == MINUS
519 && CONST_INT_P (XEXP (x, 1)))
524 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
525 to somewhere in the same object or object_block as SYMBOL. */
528 offset_within_block_p (const_rtx symbol, HOST_WIDE_INT offset)
532 if (GET_CODE (symbol) != SYMBOL_REF)
540 if (CONSTANT_POOL_ADDRESS_P (symbol)
541 && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
544 decl = SYMBOL_REF_DECL (symbol);
545 if (decl && offset < int_size_in_bytes (TREE_TYPE (decl)))
549 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)
550 && SYMBOL_REF_BLOCK (symbol)
551 && SYMBOL_REF_BLOCK_OFFSET (symbol) >= 0
552 && ((unsigned HOST_WIDE_INT) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)
553 < (unsigned HOST_WIDE_INT) SYMBOL_REF_BLOCK (symbol)->size))
559 /* Split X into a base and a constant offset, storing them in *BASE_OUT
560 and *OFFSET_OUT respectively. */
563 split_const (rtx x, rtx *base_out, rtx *offset_out)
565 if (GET_CODE (x) == CONST)
568 if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
570 *base_out = XEXP (x, 0);
571 *offset_out = XEXP (x, 1);
576 *offset_out = const0_rtx;
579 /* Return the number of places FIND appears within X. If COUNT_DEST is
580 zero, we do not count occurrences inside the destination of a SET. */
583 count_occurrences (const_rtx x, const_rtx find, int count_dest)
587 const char *format_ptr;
609 count = count_occurrences (XEXP (x, 0), find, count_dest);
611 count += count_occurrences (XEXP (x, 1), find, count_dest);
615 if (MEM_P (find) && rtx_equal_p (x, find))
620 if (SET_DEST (x) == find && ! count_dest)
621 return count_occurrences (SET_SRC (x), find, count_dest);
628 format_ptr = GET_RTX_FORMAT (code);
631 for (i = 0; i < GET_RTX_LENGTH (code); i++)
633 switch (*format_ptr++)
636 count += count_occurrences (XEXP (x, i), find, count_dest);
640 for (j = 0; j < XVECLEN (x, i); j++)
641 count += count_occurrences (XVECEXP (x, i, j), find, count_dest);
649 /* Nonzero if register REG appears somewhere within IN.
650 Also works if REG is not a register; in this case it checks
651 for a subexpression of IN that is Lisp "equal" to REG. */
654 reg_mentioned_p (const_rtx reg, const_rtx in)
666 if (GET_CODE (in) == LABEL_REF)
667 return reg == XEXP (in, 0);
669 code = GET_CODE (in);
673 /* Compare registers by number. */
675 return REG_P (reg) && REGNO (in) == REGNO (reg);
677 /* These codes have no constituent expressions
688 /* These are kept unique for a given value. */
695 if (GET_CODE (reg) == code && rtx_equal_p (reg, in))
698 fmt = GET_RTX_FORMAT (code);
700 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
705 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
706 if (reg_mentioned_p (reg, XVECEXP (in, i, j)))
709 else if (fmt[i] == 'e'
710 && reg_mentioned_p (reg, XEXP (in, i)))
716 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
717 no CODE_LABEL insn. */
720 no_labels_between_p (const_rtx beg, const_rtx end)
725 for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
731 /* Nonzero if register REG is used in an insn between
732 FROM_INSN and TO_INSN (exclusive of those two). */
735 reg_used_between_p (const_rtx reg, const_rtx from_insn, const_rtx to_insn)
739 if (from_insn == to_insn)
742 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
743 if (NONDEBUG_INSN_P (insn)
744 && (reg_overlap_mentioned_p (reg, PATTERN (insn))
745 || (CALL_P (insn) && find_reg_fusage (insn, USE, reg))))
750 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
751 is entirely replaced by a new value and the only use is as a SET_DEST,
752 we do not consider it a reference. */
755 reg_referenced_p (const_rtx x, const_rtx body)
759 switch (GET_CODE (body))
762 if (reg_overlap_mentioned_p (x, SET_SRC (body)))
765 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
766 of a REG that occupies all of the REG, the insn references X if
767 it is mentioned in the destination. */
768 if (GET_CODE (SET_DEST (body)) != CC0
769 && GET_CODE (SET_DEST (body)) != PC
770 && !REG_P (SET_DEST (body))
771 && ! (GET_CODE (SET_DEST (body)) == SUBREG
772 && REG_P (SUBREG_REG (SET_DEST (body)))
773 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body))))
774 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
775 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body)))
776 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
777 && reg_overlap_mentioned_p (x, SET_DEST (body)))
782 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
783 if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i)))
790 return reg_overlap_mentioned_p (x, body);
793 return reg_overlap_mentioned_p (x, TRAP_CONDITION (body));
796 return reg_overlap_mentioned_p (x, XEXP (body, 0));
799 case UNSPEC_VOLATILE:
800 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
801 if (reg_overlap_mentioned_p (x, XVECEXP (body, 0, i)))
806 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
807 if (reg_referenced_p (x, XVECEXP (body, 0, i)))
812 if (MEM_P (XEXP (body, 0)))
813 if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0)))
818 if (reg_overlap_mentioned_p (x, COND_EXEC_TEST (body)))
820 return reg_referenced_p (x, COND_EXEC_CODE (body));
827 /* Nonzero if register REG is set or clobbered in an insn between
828 FROM_INSN and TO_INSN (exclusive of those two). */
831 reg_set_between_p (const_rtx reg, const_rtx from_insn, const_rtx to_insn)
835 if (from_insn == to_insn)
838 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
839 if (INSN_P (insn) && reg_set_p (reg, insn))
844 /* Internals of reg_set_between_p. */
846 reg_set_p (const_rtx reg, const_rtx insn)
848 /* We can be passed an insn or part of one. If we are passed an insn,
849 check if a side-effect of the insn clobbers REG. */
851 && (FIND_REG_INC_NOTE (insn, reg)
854 && REGNO (reg) < FIRST_PSEUDO_REGISTER
855 && overlaps_hard_reg_set_p (regs_invalidated_by_call,
856 GET_MODE (reg), REGNO (reg)))
858 || find_reg_fusage (insn, CLOBBER, reg)))))
861 return set_of (reg, insn) != NULL_RTX;
864 /* Similar to reg_set_between_p, but check all registers in X. Return 0
865 only if none of them are modified between START and END. Return 1 if
866 X contains a MEM; this routine does use memory aliasing. */
869 modified_between_p (const_rtx x, const_rtx start, const_rtx end)
871 const enum rtx_code code = GET_CODE (x);
895 if (modified_between_p (XEXP (x, 0), start, end))
897 if (MEM_READONLY_P (x))
899 for (insn = NEXT_INSN (start); insn != end; insn = NEXT_INSN (insn))
900 if (memory_modified_in_insn_p (x, insn))
906 return reg_set_between_p (x, start, end);
912 fmt = GET_RTX_FORMAT (code);
913 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
915 if (fmt[i] == 'e' && modified_between_p (XEXP (x, i), start, end))
918 else if (fmt[i] == 'E')
919 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
920 if (modified_between_p (XVECEXP (x, i, j), start, end))
927 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
928 of them are modified in INSN. Return 1 if X contains a MEM; this routine
929 does use memory aliasing. */
932 modified_in_p (const_rtx x, const_rtx insn)
934 const enum rtx_code code = GET_CODE (x);
954 if (modified_in_p (XEXP (x, 0), insn))
956 if (MEM_READONLY_P (x))
958 if (memory_modified_in_insn_p (x, insn))
964 return reg_set_p (x, insn);
970 fmt = GET_RTX_FORMAT (code);
971 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
973 if (fmt[i] == 'e' && modified_in_p (XEXP (x, i), insn))
976 else if (fmt[i] == 'E')
977 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
978 if (modified_in_p (XVECEXP (x, i, j), insn))
985 /* Helper function for set_of. */
993 set_of_1 (rtx x, const_rtx pat, void *data1)
995 struct set_of_data *const data = (struct set_of_data *) (data1);
996 if (rtx_equal_p (x, data->pat)
997 || (!MEM_P (x) && reg_overlap_mentioned_p (data->pat, x)))
1001 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1002 (either directly or via STRICT_LOW_PART and similar modifiers). */
1004 set_of (const_rtx pat, const_rtx insn)
1006 struct set_of_data data;
1007 data.found = NULL_RTX;
1009 note_stores (INSN_P (insn) ? PATTERN (insn) : insn, set_of_1, &data);
1013 /* Given an INSN, return a SET expression if this insn has only a single SET.
1014 It may also have CLOBBERs, USEs, or SET whose output
1015 will not be used, which we ignore. */
1018 single_set_2 (const_rtx insn, const_rtx pat)
1021 int set_verified = 1;
1024 if (GET_CODE (pat) == PARALLEL)
1026 for (i = 0; i < XVECLEN (pat, 0); i++)
1028 rtx sub = XVECEXP (pat, 0, i);
1029 switch (GET_CODE (sub))
1036 /* We can consider insns having multiple sets, where all
1037 but one are dead as single set insns. In common case
1038 only single set is present in the pattern so we want
1039 to avoid checking for REG_UNUSED notes unless necessary.
1041 When we reach set first time, we just expect this is
1042 the single set we are looking for and only when more
1043 sets are found in the insn, we check them. */
1046 if (find_reg_note (insn, REG_UNUSED, SET_DEST (set))
1047 && !side_effects_p (set))
1053 set = sub, set_verified = 0;
1054 else if (!find_reg_note (insn, REG_UNUSED, SET_DEST (sub))
1055 || side_effects_p (sub))
1067 /* Given an INSN, return nonzero if it has more than one SET, else return
1071 multiple_sets (const_rtx insn)
1076 /* INSN must be an insn. */
1077 if (! INSN_P (insn))
1080 /* Only a PARALLEL can have multiple SETs. */
1081 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1083 for (i = 0, found = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1084 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1086 /* If we have already found a SET, then return now. */
1094 /* Either zero or one SET. */
1098 /* Return nonzero if the destination of SET equals the source
1099 and there are no side effects. */
1102 set_noop_p (const_rtx set)
1104 rtx src = SET_SRC (set);
1105 rtx dst = SET_DEST (set);
1107 if (dst == pc_rtx && src == pc_rtx)
1110 if (MEM_P (dst) && MEM_P (src))
1111 return rtx_equal_p (dst, src) && !side_effects_p (dst);
1113 if (GET_CODE (dst) == ZERO_EXTRACT)
1114 return rtx_equal_p (XEXP (dst, 0), src)
1115 && ! BYTES_BIG_ENDIAN && XEXP (dst, 2) == const0_rtx
1116 && !side_effects_p (src);
1118 if (GET_CODE (dst) == STRICT_LOW_PART)
1119 dst = XEXP (dst, 0);
1121 if (GET_CODE (src) == SUBREG && GET_CODE (dst) == SUBREG)
1123 if (SUBREG_BYTE (src) != SUBREG_BYTE (dst))
1125 src = SUBREG_REG (src);
1126 dst = SUBREG_REG (dst);
1129 return (REG_P (src) && REG_P (dst)
1130 && REGNO (src) == REGNO (dst));
1133 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1137 noop_move_p (const_rtx insn)
1139 rtx pat = PATTERN (insn);
1141 if (INSN_CODE (insn) == NOOP_MOVE_INSN_CODE)
1144 /* Insns carrying these notes are useful later on. */
1145 if (find_reg_note (insn, REG_EQUAL, NULL_RTX))
1148 if (GET_CODE (pat) == SET && set_noop_p (pat))
1151 if (GET_CODE (pat) == PARALLEL)
1154 /* If nothing but SETs of registers to themselves,
1155 this insn can also be deleted. */
1156 for (i = 0; i < XVECLEN (pat, 0); i++)
1158 rtx tem = XVECEXP (pat, 0, i);
1160 if (GET_CODE (tem) == USE
1161 || GET_CODE (tem) == CLOBBER)
1164 if (GET_CODE (tem) != SET || ! set_noop_p (tem))
1174 /* Return the last thing that X was assigned from before *PINSN. If VALID_TO
1175 is not NULL_RTX then verify that the object is not modified up to VALID_TO.
1176 If the object was modified, if we hit a partial assignment to X, or hit a
1177 CODE_LABEL first, return X. If we found an assignment, update *PINSN to
1178 point to it. ALLOW_HWREG is set to 1 if hardware registers are allowed to
1182 find_last_value (rtx x, rtx *pinsn, rtx valid_to, int allow_hwreg)
1186 for (p = PREV_INSN (*pinsn); p && !LABEL_P (p);
1190 rtx set = single_set (p);
1191 rtx note = find_reg_note (p, REG_EQUAL, NULL_RTX);
1193 if (set && rtx_equal_p (x, SET_DEST (set)))
1195 rtx src = SET_SRC (set);
1197 if (note && GET_CODE (XEXP (note, 0)) != EXPR_LIST)
1198 src = XEXP (note, 0);
1200 if ((valid_to == NULL_RTX
1201 || ! modified_between_p (src, PREV_INSN (p), valid_to))
1202 /* Reject hard registers because we don't usually want
1203 to use them; we'd rather use a pseudo. */
1205 && REGNO (src) < FIRST_PSEUDO_REGISTER) || allow_hwreg))
1212 /* If set in non-simple way, we don't have a value. */
1213 if (reg_set_p (x, p))
1220 /* Return nonzero if register in range [REGNO, ENDREGNO)
1221 appears either explicitly or implicitly in X
1222 other than being stored into.
1224 References contained within the substructure at LOC do not count.
1225 LOC may be zero, meaning don't ignore anything. */
1228 refers_to_regno_p (unsigned int regno, unsigned int endregno, const_rtx x,
1232 unsigned int x_regno;
1237 /* The contents of a REG_NONNEG note is always zero, so we must come here
1238 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1242 code = GET_CODE (x);
1247 x_regno = REGNO (x);
1249 /* If we modifying the stack, frame, or argument pointer, it will
1250 clobber a virtual register. In fact, we could be more precise,
1251 but it isn't worth it. */
1252 if ((x_regno == STACK_POINTER_REGNUM
1253 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1254 || x_regno == ARG_POINTER_REGNUM
1256 || x_regno == FRAME_POINTER_REGNUM)
1257 && regno >= FIRST_VIRTUAL_REGISTER && regno <= LAST_VIRTUAL_REGISTER)
1260 return endregno > x_regno && regno < END_REGNO (x);
1263 /* If this is a SUBREG of a hard reg, we can see exactly which
1264 registers are being modified. Otherwise, handle normally. */
1265 if (REG_P (SUBREG_REG (x))
1266 && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
1268 unsigned int inner_regno = subreg_regno (x);
1269 unsigned int inner_endregno
1270 = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
1271 ? subreg_nregs (x) : 1);
1273 return endregno > inner_regno && regno < inner_endregno;
1279 if (&SET_DEST (x) != loc
1280 /* Note setting a SUBREG counts as referring to the REG it is in for
1281 a pseudo but not for hard registers since we can
1282 treat each word individually. */
1283 && ((GET_CODE (SET_DEST (x)) == SUBREG
1284 && loc != &SUBREG_REG (SET_DEST (x))
1285 && REG_P (SUBREG_REG (SET_DEST (x)))
1286 && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
1287 && refers_to_regno_p (regno, endregno,
1288 SUBREG_REG (SET_DEST (x)), loc))
1289 || (!REG_P (SET_DEST (x))
1290 && refers_to_regno_p (regno, endregno, SET_DEST (x), loc))))
1293 if (code == CLOBBER || loc == &SET_SRC (x))
1302 /* X does not match, so try its subexpressions. */
1304 fmt = GET_RTX_FORMAT (code);
1305 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1307 if (fmt[i] == 'e' && loc != &XEXP (x, i))
1315 if (refers_to_regno_p (regno, endregno, XEXP (x, i), loc))
1318 else if (fmt[i] == 'E')
1321 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1322 if (loc != &XVECEXP (x, i, j)
1323 && refers_to_regno_p (regno, endregno, XVECEXP (x, i, j), loc))
1330 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1331 we check if any register number in X conflicts with the relevant register
1332 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1333 contains a MEM (we don't bother checking for memory addresses that can't
1334 conflict because we expect this to be a rare case. */
1337 reg_overlap_mentioned_p (const_rtx x, const_rtx in)
1339 unsigned int regno, endregno;
1341 /* If either argument is a constant, then modifying X can not
1342 affect IN. Here we look at IN, we can profitably combine
1343 CONSTANT_P (x) with the switch statement below. */
1344 if (CONSTANT_P (in))
1348 switch (GET_CODE (x))
1350 case STRICT_LOW_PART:
1353 /* Overly conservative. */
1358 regno = REGNO (SUBREG_REG (x));
1359 if (regno < FIRST_PSEUDO_REGISTER)
1360 regno = subreg_regno (x);
1361 endregno = regno + (regno < FIRST_PSEUDO_REGISTER
1362 ? subreg_nregs (x) : 1);
1367 endregno = END_REGNO (x);
1369 return refers_to_regno_p (regno, endregno, in, (rtx*) 0);
1379 fmt = GET_RTX_FORMAT (GET_CODE (in));
1380 for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--)
1383 if (reg_overlap_mentioned_p (x, XEXP (in, i)))
1386 else if (fmt[i] == 'E')
1389 for (j = XVECLEN (in, i) - 1; j >= 0; --j)
1390 if (reg_overlap_mentioned_p (x, XVECEXP (in, i, j)))
1400 return reg_mentioned_p (x, in);
1406 /* If any register in here refers to it we return true. */
1407 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1408 if (XEXP (XVECEXP (x, 0, i), 0) != 0
1409 && reg_overlap_mentioned_p (XEXP (XVECEXP (x, 0, i), 0), in))
1415 gcc_assert (CONSTANT_P (x));
1420 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1421 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1422 ignored by note_stores, but passed to FUN.
1424 FUN receives three arguments:
1425 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1426 2. the SET or CLOBBER rtx that does the store,
1427 3. the pointer DATA provided to note_stores.
1429 If the item being stored in or clobbered is a SUBREG of a hard register,
1430 the SUBREG will be passed. */
1433 note_stores (const_rtx x, void (*fun) (rtx, const_rtx, void *), void *data)
1437 if (GET_CODE (x) == COND_EXEC)
1438 x = COND_EXEC_CODE (x);
1440 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
1442 rtx dest = SET_DEST (x);
1444 while ((GET_CODE (dest) == SUBREG
1445 && (!REG_P (SUBREG_REG (dest))
1446 || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
1447 || GET_CODE (dest) == ZERO_EXTRACT
1448 || GET_CODE (dest) == STRICT_LOW_PART)
1449 dest = XEXP (dest, 0);
1451 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1452 each of whose first operand is a register. */
1453 if (GET_CODE (dest) == PARALLEL)
1455 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1456 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1457 (*fun) (XEXP (XVECEXP (dest, 0, i), 0), x, data);
1460 (*fun) (dest, x, data);
1463 else if (GET_CODE (x) == PARALLEL)
1464 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1465 note_stores (XVECEXP (x, 0, i), fun, data);
1468 /* Like notes_stores, but call FUN for each expression that is being
1469 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1470 FUN for each expression, not any interior subexpressions. FUN receives a
1471 pointer to the expression and the DATA passed to this function.
1473 Note that this is not quite the same test as that done in reg_referenced_p
1474 since that considers something as being referenced if it is being
1475 partially set, while we do not. */
1478 note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
1483 switch (GET_CODE (body))
1486 (*fun) (&COND_EXEC_TEST (body), data);
1487 note_uses (&COND_EXEC_CODE (body), fun, data);
1491 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1492 note_uses (&XVECEXP (body, 0, i), fun, data);
1496 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1497 note_uses (&PATTERN (XVECEXP (body, 0, i)), fun, data);
1501 (*fun) (&XEXP (body, 0), data);
1505 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1506 (*fun) (&ASM_OPERANDS_INPUT (body, i), data);
1510 (*fun) (&TRAP_CONDITION (body), data);
1514 (*fun) (&XEXP (body, 0), data);
1518 case UNSPEC_VOLATILE:
1519 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1520 (*fun) (&XVECEXP (body, 0, i), data);
1524 if (MEM_P (XEXP (body, 0)))
1525 (*fun) (&XEXP (XEXP (body, 0), 0), data);
1530 rtx dest = SET_DEST (body);
1532 /* For sets we replace everything in source plus registers in memory
1533 expression in store and operands of a ZERO_EXTRACT. */
1534 (*fun) (&SET_SRC (body), data);
1536 if (GET_CODE (dest) == ZERO_EXTRACT)
1538 (*fun) (&XEXP (dest, 1), data);
1539 (*fun) (&XEXP (dest, 2), data);
1542 while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART)
1543 dest = XEXP (dest, 0);
1546 (*fun) (&XEXP (dest, 0), data);
1551 /* All the other possibilities never store. */
1552 (*fun) (pbody, data);
1557 /* Return nonzero if X's old contents don't survive after INSN.
1558 This will be true if X is (cc0) or if X is a register and
1559 X dies in INSN or because INSN entirely sets X.
1561 "Entirely set" means set directly and not through a SUBREG, or
1562 ZERO_EXTRACT, so no trace of the old contents remains.
1563 Likewise, REG_INC does not count.
1565 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1566 but for this use that makes no difference, since regs don't overlap
1567 during their lifetimes. Therefore, this function may be used
1568 at any time after deaths have been computed.
1570 If REG is a hard reg that occupies multiple machine registers, this
1571 function will only return 1 if each of those registers will be replaced
1575 dead_or_set_p (const_rtx insn, const_rtx x)
1577 unsigned int regno, end_regno;
1580 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1581 if (GET_CODE (x) == CC0)
1584 gcc_assert (REG_P (x));
1587 end_regno = END_REGNO (x);
1588 for (i = regno; i < end_regno; i++)
1589 if (! dead_or_set_regno_p (insn, i))
1595 /* Return TRUE iff DEST is a register or subreg of a register and
1596 doesn't change the number of words of the inner register, and any
1597 part of the register is TEST_REGNO. */
1600 covers_regno_no_parallel_p (const_rtx dest, unsigned int test_regno)
1602 unsigned int regno, endregno;
1604 if (GET_CODE (dest) == SUBREG
1605 && (((GET_MODE_SIZE (GET_MODE (dest))
1606 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
1607 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
1608 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)))
1609 dest = SUBREG_REG (dest);
1614 regno = REGNO (dest);
1615 endregno = END_REGNO (dest);
1616 return (test_regno >= regno && test_regno < endregno);
1619 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
1620 any member matches the covers_regno_no_parallel_p criteria. */
1623 covers_regno_p (const_rtx dest, unsigned int test_regno)
1625 if (GET_CODE (dest) == PARALLEL)
1627 /* Some targets place small structures in registers for return
1628 values of functions, and those registers are wrapped in
1629 PARALLELs that we may see as the destination of a SET. */
1632 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1634 rtx inner = XEXP (XVECEXP (dest, 0, i), 0);
1635 if (inner != NULL_RTX
1636 && covers_regno_no_parallel_p (inner, test_regno))
1643 return covers_regno_no_parallel_p (dest, test_regno);
1646 /* Utility function for dead_or_set_p to check an individual register. */
1649 dead_or_set_regno_p (const_rtx insn, unsigned int test_regno)
1653 /* See if there is a death note for something that includes TEST_REGNO. */
1654 if (find_regno_note (insn, REG_DEAD, test_regno))
1658 && find_regno_fusage (insn, CLOBBER, test_regno))
1661 pattern = PATTERN (insn);
1663 if (GET_CODE (pattern) == COND_EXEC)
1664 pattern = COND_EXEC_CODE (pattern);
1666 if (GET_CODE (pattern) == SET)
1667 return covers_regno_p (SET_DEST (pattern), test_regno);
1668 else if (GET_CODE (pattern) == PARALLEL)
1672 for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
1674 rtx body = XVECEXP (pattern, 0, i);
1676 if (GET_CODE (body) == COND_EXEC)
1677 body = COND_EXEC_CODE (body);
1679 if ((GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
1680 && covers_regno_p (SET_DEST (body), test_regno))
1688 /* Return the reg-note of kind KIND in insn INSN, if there is one.
1689 If DATUM is nonzero, look for one whose datum is DATUM. */
1692 find_reg_note (const_rtx insn, enum reg_note kind, const_rtx datum)
1698 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1699 if (! INSN_P (insn))
1703 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1704 if (REG_NOTE_KIND (link) == kind)
1709 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1710 if (REG_NOTE_KIND (link) == kind && datum == XEXP (link, 0))
1715 /* Return the reg-note of kind KIND in insn INSN which applies to register
1716 number REGNO, if any. Return 0 if there is no such reg-note. Note that
1717 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
1718 it might be the case that the note overlaps REGNO. */
1721 find_regno_note (const_rtx insn, enum reg_note kind, unsigned int regno)
1725 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1726 if (! INSN_P (insn))
1729 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1730 if (REG_NOTE_KIND (link) == kind
1731 /* Verify that it is a register, so that scratch and MEM won't cause a
1733 && REG_P (XEXP (link, 0))
1734 && REGNO (XEXP (link, 0)) <= regno
1735 && END_REGNO (XEXP (link, 0)) > regno)
1740 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
1744 find_reg_equal_equiv_note (const_rtx insn)
1751 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1752 if (REG_NOTE_KIND (link) == REG_EQUAL
1753 || REG_NOTE_KIND (link) == REG_EQUIV)
1755 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
1756 insns that have multiple sets. Checking single_set to
1757 make sure of this is not the proper check, as explained
1758 in the comment in set_unique_reg_note.
1760 This should be changed into an assert. */
1761 if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn))
1768 /* Check whether INSN is a single_set whose source is known to be
1769 equivalent to a constant. Return that constant if so, otherwise
1773 find_constant_src (const_rtx insn)
1777 set = single_set (insn);
1780 x = avoid_constant_pool_reference (SET_SRC (set));
1785 note = find_reg_equal_equiv_note (insn);
1786 if (note && CONSTANT_P (XEXP (note, 0)))
1787 return XEXP (note, 0);
1792 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
1793 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1796 find_reg_fusage (const_rtx insn, enum rtx_code code, const_rtx datum)
1798 /* If it's not a CALL_INSN, it can't possibly have a
1799 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
1809 for (link = CALL_INSN_FUNCTION_USAGE (insn);
1811 link = XEXP (link, 1))
1812 if (GET_CODE (XEXP (link, 0)) == code
1813 && rtx_equal_p (datum, XEXP (XEXP (link, 0), 0)))
1818 unsigned int regno = REGNO (datum);
1820 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1821 to pseudo registers, so don't bother checking. */
1823 if (regno < FIRST_PSEUDO_REGISTER)
1825 unsigned int end_regno = END_HARD_REGNO (datum);
1828 for (i = regno; i < end_regno; i++)
1829 if (find_regno_fusage (insn, code, i))
1837 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
1838 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1841 find_regno_fusage (const_rtx insn, enum rtx_code code, unsigned int regno)
1845 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1846 to pseudo registers, so don't bother checking. */
1848 if (regno >= FIRST_PSEUDO_REGISTER
1852 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1856 if (GET_CODE (op = XEXP (link, 0)) == code
1857 && REG_P (reg = XEXP (op, 0))
1858 && REGNO (reg) <= regno
1859 && END_HARD_REGNO (reg) > regno)
1867 /* Allocate a register note with kind KIND and datum DATUM. LIST is
1868 stored as the pointer to the next register note. */
1871 alloc_reg_note (enum reg_note kind, rtx datum, rtx list)
1879 case REG_LABEL_TARGET:
1880 case REG_LABEL_OPERAND:
1881 /* These types of register notes use an INSN_LIST rather than an
1882 EXPR_LIST, so that copying is done right and dumps look
1884 note = alloc_INSN_LIST (datum, list);
1885 PUT_REG_NOTE_KIND (note, kind);
1889 note = alloc_EXPR_LIST (kind, datum, list);
1896 /* Add register note with kind KIND and datum DATUM to INSN. */
1899 add_reg_note (rtx insn, enum reg_note kind, rtx datum)
1901 REG_NOTES (insn) = alloc_reg_note (kind, datum, REG_NOTES (insn));
1904 /* Remove register note NOTE from the REG_NOTES of INSN. */
1907 remove_note (rtx insn, const_rtx note)
1911 if (note == NULL_RTX)
1914 if (REG_NOTES (insn) == note)
1915 REG_NOTES (insn) = XEXP (note, 1);
1917 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1918 if (XEXP (link, 1) == note)
1920 XEXP (link, 1) = XEXP (note, 1);
1924 switch (REG_NOTE_KIND (note))
1928 df_notes_rescan (insn);
1935 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes. */
1938 remove_reg_equal_equiv_notes (rtx insn)
1942 loc = ®_NOTES (insn);
1945 enum reg_note kind = REG_NOTE_KIND (*loc);
1946 if (kind == REG_EQUAL || kind == REG_EQUIV)
1947 *loc = XEXP (*loc, 1);
1949 loc = &XEXP (*loc, 1);
1953 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
1954 return 1 if it is found. A simple equality test is used to determine if
1958 in_expr_list_p (const_rtx listp, const_rtx node)
1962 for (x = listp; x; x = XEXP (x, 1))
1963 if (node == XEXP (x, 0))
1969 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
1970 remove that entry from the list if it is found.
1972 A simple equality test is used to determine if NODE matches. */
1975 remove_node_from_expr_list (const_rtx node, rtx *listp)
1978 rtx prev = NULL_RTX;
1982 if (node == XEXP (temp, 0))
1984 /* Splice the node out of the list. */
1986 XEXP (prev, 1) = XEXP (temp, 1);
1988 *listp = XEXP (temp, 1);
1994 temp = XEXP (temp, 1);
1998 /* Nonzero if X contains any volatile instructions. These are instructions
1999 which may cause unpredictable machine state instructions, and thus no
2000 instructions should be moved or combined across them. This includes
2001 only volatile asms and UNSPEC_VOLATILE instructions. */
2004 volatile_insn_p (const_rtx x)
2006 const RTX_CODE code = GET_CODE (x);
2027 case UNSPEC_VOLATILE:
2028 /* case TRAP_IF: This isn't clear yet. */
2033 if (MEM_VOLATILE_P (x))
2040 /* Recursively scan the operands of this expression. */
2043 const char *const fmt = GET_RTX_FORMAT (code);
2046 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2050 if (volatile_insn_p (XEXP (x, i)))
2053 else if (fmt[i] == 'E')
2056 for (j = 0; j < XVECLEN (x, i); j++)
2057 if (volatile_insn_p (XVECEXP (x, i, j)))
2065 /* Nonzero if X contains any volatile memory references
2066 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2069 volatile_refs_p (const_rtx x)
2071 const RTX_CODE code = GET_CODE (x);
2090 case UNSPEC_VOLATILE:
2096 if (MEM_VOLATILE_P (x))
2103 /* Recursively scan the operands of this expression. */
2106 const char *const fmt = GET_RTX_FORMAT (code);
2109 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2113 if (volatile_refs_p (XEXP (x, i)))
2116 else if (fmt[i] == 'E')
2119 for (j = 0; j < XVECLEN (x, i); j++)
2120 if (volatile_refs_p (XVECEXP (x, i, j)))
2128 /* Similar to above, except that it also rejects register pre- and post-
2132 side_effects_p (const_rtx x)
2134 const RTX_CODE code = GET_CODE (x);
2154 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2155 when some combination can't be done. If we see one, don't think
2156 that we can simplify the expression. */
2157 return (GET_MODE (x) != VOIDmode);
2166 case UNSPEC_VOLATILE:
2167 /* case TRAP_IF: This isn't clear yet. */
2173 if (MEM_VOLATILE_P (x))
2180 /* Recursively scan the operands of this expression. */
2183 const char *fmt = GET_RTX_FORMAT (code);
2186 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2190 if (side_effects_p (XEXP (x, i)))
2193 else if (fmt[i] == 'E')
2196 for (j = 0; j < XVECLEN (x, i); j++)
2197 if (side_effects_p (XVECEXP (x, i, j)))
2205 /* Return nonzero if evaluating rtx X might cause a trap.
2206 FLAGS controls how to consider MEMs. A nonzero means the context
2207 of the access may have changed from the original, such that the
2208 address may have become invalid. */
2211 may_trap_p_1 (const_rtx x, unsigned flags)
2217 /* We make no distinction currently, but this function is part of
2218 the internal target-hooks ABI so we keep the parameter as
2219 "unsigned flags". */
2220 bool code_changed = flags != 0;
2224 code = GET_CODE (x);
2227 /* Handle these cases quickly. */
2242 case UNSPEC_VOLATILE:
2243 return targetm.unspec_may_trap_p (x, flags);
2250 return MEM_VOLATILE_P (x);
2252 /* Memory ref can trap unless it's a static var or a stack slot. */
2254 /* Recognize specific pattern of stack checking probes. */
2255 if (flag_stack_check
2256 && MEM_VOLATILE_P (x)
2257 && XEXP (x, 0) == stack_pointer_rtx)
2259 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2260 reference; moving it out of context such as when moving code
2261 when optimizing, might cause its address to become invalid. */
2263 || !MEM_NOTRAP_P (x))
2265 HOST_WIDE_INT size = MEM_SIZE (x) ? INTVAL (MEM_SIZE (x)) : 0;
2266 return rtx_addr_can_trap_p_1 (XEXP (x, 0), 0, size,
2267 GET_MODE (x), code_changed);
2272 /* Division by a non-constant might trap. */
2277 if (HONOR_SNANS (GET_MODE (x)))
2279 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)))
2280 return flag_trapping_math;
2281 if (!CONSTANT_P (XEXP (x, 1)) || (XEXP (x, 1) == const0_rtx))
2286 /* An EXPR_LIST is used to represent a function call. This
2287 certainly may trap. */
2296 /* Some floating point comparisons may trap. */
2297 if (!flag_trapping_math)
2299 /* ??? There is no machine independent way to check for tests that trap
2300 when COMPARE is used, though many targets do make this distinction.
2301 For instance, sparc uses CCFPE for compares which generate exceptions
2302 and CCFP for compares which do not generate exceptions. */
2303 if (HONOR_NANS (GET_MODE (x)))
2305 /* But often the compare has some CC mode, so check operand
2307 if (HONOR_NANS (GET_MODE (XEXP (x, 0)))
2308 || HONOR_NANS (GET_MODE (XEXP (x, 1))))
2314 if (HONOR_SNANS (GET_MODE (x)))
2316 /* Often comparison is CC mode, so check operand modes. */
2317 if (HONOR_SNANS (GET_MODE (XEXP (x, 0)))
2318 || HONOR_SNANS (GET_MODE (XEXP (x, 1))))
2323 /* Conversion of floating point might trap. */
2324 if (flag_trapping_math && HONOR_NANS (GET_MODE (XEXP (x, 0))))
2331 /* These operations don't trap even with floating point. */
2335 /* Any floating arithmetic may trap. */
2336 if (SCALAR_FLOAT_MODE_P (GET_MODE (x))
2337 && flag_trapping_math)
2341 fmt = GET_RTX_FORMAT (code);
2342 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2346 if (may_trap_p_1 (XEXP (x, i), flags))
2349 else if (fmt[i] == 'E')
2352 for (j = 0; j < XVECLEN (x, i); j++)
2353 if (may_trap_p_1 (XVECEXP (x, i, j), flags))
2360 /* Return nonzero if evaluating rtx X might cause a trap. */
2363 may_trap_p (const_rtx x)
2365 return may_trap_p_1 (x, 0);
2368 /* Same as above, but additionally return nonzero if evaluating rtx X might
2369 cause a fault. We define a fault for the purpose of this function as a
2370 erroneous execution condition that cannot be encountered during the normal
2371 execution of a valid program; the typical example is an unaligned memory
2372 access on a strict alignment machine. The compiler guarantees that it
2373 doesn't generate code that will fault from a valid program, but this
2374 guarantee doesn't mean anything for individual instructions. Consider
2375 the following example:
2377 struct S { int d; union { char *cp; int *ip; }; };
2379 int foo(struct S *s)
2387 on a strict alignment machine. In a valid program, foo will never be
2388 invoked on a structure for which d is equal to 1 and the underlying
2389 unique field of the union not aligned on a 4-byte boundary, but the
2390 expression *s->ip might cause a fault if considered individually.
2392 At the RTL level, potentially problematic expressions will almost always
2393 verify may_trap_p; for example, the above dereference can be emitted as
2394 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2395 However, suppose that foo is inlined in a caller that causes s->cp to
2396 point to a local character variable and guarantees that s->d is not set
2397 to 1; foo may have been effectively translated into pseudo-RTL as:
2400 (set (reg:SI) (mem:SI (%fp - 7)))
2402 (set (reg:QI) (mem:QI (%fp - 7)))
2404 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2405 memory reference to a stack slot, but it will certainly cause a fault
2406 on a strict alignment machine. */
2409 may_trap_or_fault_p (const_rtx x)
2411 return may_trap_p_1 (x, 1);
2414 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2415 i.e., an inequality. */
2418 inequality_comparisons_p (const_rtx x)
2422 const enum rtx_code code = GET_CODE (x);
2453 len = GET_RTX_LENGTH (code);
2454 fmt = GET_RTX_FORMAT (code);
2456 for (i = 0; i < len; i++)
2460 if (inequality_comparisons_p (XEXP (x, i)))
2463 else if (fmt[i] == 'E')
2466 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2467 if (inequality_comparisons_p (XVECEXP (x, i, j)))
2475 /* Replace any occurrence of FROM in X with TO. The function does
2476 not enter into CONST_DOUBLE for the replace.
2478 Note that copying is not done so X must not be shared unless all copies
2479 are to be modified. */
2482 replace_rtx (rtx x, rtx from, rtx to)
2487 /* The following prevents loops occurrence when we change MEM in
2488 CONST_DOUBLE onto the same CONST_DOUBLE. */
2489 if (x != 0 && GET_CODE (x) == CONST_DOUBLE)
2495 /* Allow this function to make replacements in EXPR_LISTs. */
2499 if (GET_CODE (x) == SUBREG)
2501 rtx new_rtx = replace_rtx (SUBREG_REG (x), from, to);
2503 if (CONST_INT_P (new_rtx))
2505 x = simplify_subreg (GET_MODE (x), new_rtx,
2506 GET_MODE (SUBREG_REG (x)),
2511 SUBREG_REG (x) = new_rtx;
2515 else if (GET_CODE (x) == ZERO_EXTEND)
2517 rtx new_rtx = replace_rtx (XEXP (x, 0), from, to);
2519 if (CONST_INT_P (new_rtx))
2521 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
2522 new_rtx, GET_MODE (XEXP (x, 0)));
2526 XEXP (x, 0) = new_rtx;
2531 fmt = GET_RTX_FORMAT (GET_CODE (x));
2532 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
2535 XEXP (x, i) = replace_rtx (XEXP (x, i), from, to);
2536 else if (fmt[i] == 'E')
2537 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2538 XVECEXP (x, i, j) = replace_rtx (XVECEXP (x, i, j), from, to);
2544 /* Replace occurrences of the old label in *X with the new one.
2545 DATA is a REPLACE_LABEL_DATA containing the old and new labels. */
2548 replace_label (rtx *x, void *data)
2551 rtx old_label = ((replace_label_data *) data)->r1;
2552 rtx new_label = ((replace_label_data *) data)->r2;
2553 bool update_label_nuses = ((replace_label_data *) data)->update_label_nuses;
2558 if (GET_CODE (l) == SYMBOL_REF
2559 && CONSTANT_POOL_ADDRESS_P (l))
2561 rtx c = get_pool_constant (l);
2562 if (rtx_referenced_p (old_label, c))
2565 replace_label_data *d = (replace_label_data *) data;
2567 /* Create a copy of constant C; replace the label inside
2568 but do not update LABEL_NUSES because uses in constant pool
2570 new_c = copy_rtx (c);
2571 d->update_label_nuses = false;
2572 for_each_rtx (&new_c, replace_label, data);
2573 d->update_label_nuses = update_label_nuses;
2575 /* Add the new constant NEW_C to constant pool and replace
2576 the old reference to constant by new reference. */
2577 new_l = XEXP (force_const_mem (get_pool_mode (l), new_c), 0);
2578 *x = replace_rtx (l, l, new_l);
2583 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
2584 field. This is not handled by for_each_rtx because it doesn't
2585 handle unprinted ('0') fields. */
2586 if (JUMP_P (l) && JUMP_LABEL (l) == old_label)
2587 JUMP_LABEL (l) = new_label;
2589 if ((GET_CODE (l) == LABEL_REF
2590 || GET_CODE (l) == INSN_LIST)
2591 && XEXP (l, 0) == old_label)
2593 XEXP (l, 0) = new_label;
2594 if (update_label_nuses)
2596 ++LABEL_NUSES (new_label);
2597 --LABEL_NUSES (old_label);
2605 /* When *BODY is equal to X or X is directly referenced by *BODY
2606 return nonzero, thus FOR_EACH_RTX stops traversing and returns nonzero
2607 too, otherwise FOR_EACH_RTX continues traversing *BODY. */
2610 rtx_referenced_p_1 (rtx *body, void *x)
2614 if (*body == NULL_RTX)
2615 return y == NULL_RTX;
2617 /* Return true if a label_ref *BODY refers to label Y. */
2618 if (GET_CODE (*body) == LABEL_REF && LABEL_P (y))
2619 return XEXP (*body, 0) == y;
2621 /* If *BODY is a reference to pool constant traverse the constant. */
2622 if (GET_CODE (*body) == SYMBOL_REF
2623 && CONSTANT_POOL_ADDRESS_P (*body))
2624 return rtx_referenced_p (y, get_pool_constant (*body));
2626 /* By default, compare the RTL expressions. */
2627 return rtx_equal_p (*body, y);
2630 /* Return true if X is referenced in BODY. */
2633 rtx_referenced_p (rtx x, rtx body)
2635 return for_each_rtx (&body, rtx_referenced_p_1, x);
2638 /* If INSN is a tablejump return true and store the label (before jump table) to
2639 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
2642 tablejump_p (const_rtx insn, rtx *labelp, rtx *tablep)
2647 && (label = JUMP_LABEL (insn)) != NULL_RTX
2648 && (table = next_active_insn (label)) != NULL_RTX
2649 && JUMP_TABLE_DATA_P (table))
2660 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
2661 constant that is not in the constant pool and not in the condition
2662 of an IF_THEN_ELSE. */
2665 computed_jump_p_1 (const_rtx x)
2667 const enum rtx_code code = GET_CODE (x);
2687 return ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
2688 && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
2691 return (computed_jump_p_1 (XEXP (x, 1))
2692 || computed_jump_p_1 (XEXP (x, 2)));
2698 fmt = GET_RTX_FORMAT (code);
2699 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2702 && computed_jump_p_1 (XEXP (x, i)))
2705 else if (fmt[i] == 'E')
2706 for (j = 0; j < XVECLEN (x, i); j++)
2707 if (computed_jump_p_1 (XVECEXP (x, i, j)))
2714 /* Return nonzero if INSN is an indirect jump (aka computed jump).
2716 Tablejumps and casesi insns are not considered indirect jumps;
2717 we can recognize them by a (use (label_ref)). */
2720 computed_jump_p (const_rtx insn)
2725 rtx pat = PATTERN (insn);
2727 /* If we have a JUMP_LABEL set, we're not a computed jump. */
2728 if (JUMP_LABEL (insn) != NULL)
2731 if (GET_CODE (pat) == PARALLEL)
2733 int len = XVECLEN (pat, 0);
2734 int has_use_labelref = 0;
2736 for (i = len - 1; i >= 0; i--)
2737 if (GET_CODE (XVECEXP (pat, 0, i)) == USE
2738 && (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))
2740 has_use_labelref = 1;
2742 if (! has_use_labelref)
2743 for (i = len - 1; i >= 0; i--)
2744 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
2745 && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
2746 && computed_jump_p_1 (SET_SRC (XVECEXP (pat, 0, i))))
2749 else if (GET_CODE (pat) == SET
2750 && SET_DEST (pat) == pc_rtx
2751 && computed_jump_p_1 (SET_SRC (pat)))
2757 /* Optimized loop of for_each_rtx, trying to avoid useless recursive
2758 calls. Processes the subexpressions of EXP and passes them to F. */
2760 for_each_rtx_1 (rtx exp, int n, rtx_function f, void *data)
2763 const char *format = GET_RTX_FORMAT (GET_CODE (exp));
2766 for (; format[n] != '\0'; n++)
2773 result = (*f) (x, data);
2775 /* Do not traverse sub-expressions. */
2777 else if (result != 0)
2778 /* Stop the traversal. */
2782 /* There are no sub-expressions. */
2785 i = non_rtx_starting_operands[GET_CODE (*x)];
2788 result = for_each_rtx_1 (*x, i, f, data);
2796 if (XVEC (exp, n) == 0)
2798 for (j = 0; j < XVECLEN (exp, n); ++j)
2801 x = &XVECEXP (exp, n, j);
2802 result = (*f) (x, data);
2804 /* Do not traverse sub-expressions. */
2806 else if (result != 0)
2807 /* Stop the traversal. */
2811 /* There are no sub-expressions. */
2814 i = non_rtx_starting_operands[GET_CODE (*x)];
2817 result = for_each_rtx_1 (*x, i, f, data);
2825 /* Nothing to do. */
2833 /* Traverse X via depth-first search, calling F for each
2834 sub-expression (including X itself). F is also passed the DATA.
2835 If F returns -1, do not traverse sub-expressions, but continue
2836 traversing the rest of the tree. If F ever returns any other
2837 nonzero value, stop the traversal, and return the value returned
2838 by F. Otherwise, return 0. This function does not traverse inside
2839 tree structure that contains RTX_EXPRs, or into sub-expressions
2840 whose format code is `0' since it is not known whether or not those
2841 codes are actually RTL.
2843 This routine is very general, and could (should?) be used to
2844 implement many of the other routines in this file. */
2847 for_each_rtx (rtx *x, rtx_function f, void *data)
2853 result = (*f) (x, data);
2855 /* Do not traverse sub-expressions. */
2857 else if (result != 0)
2858 /* Stop the traversal. */
2862 /* There are no sub-expressions. */
2865 i = non_rtx_starting_operands[GET_CODE (*x)];
2869 return for_each_rtx_1 (*x, i, f, data);
2873 /* Searches X for any reference to REGNO, returning the rtx of the
2874 reference found if any. Otherwise, returns NULL_RTX. */
2877 regno_use_in (unsigned int regno, rtx x)
2883 if (REG_P (x) && REGNO (x) == regno)
2886 fmt = GET_RTX_FORMAT (GET_CODE (x));
2887 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
2891 if ((tem = regno_use_in (regno, XEXP (x, i))))
2894 else if (fmt[i] == 'E')
2895 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2896 if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
2903 /* Return a value indicating whether OP, an operand of a commutative
2904 operation, is preferred as the first or second operand. The higher
2905 the value, the stronger the preference for being the first operand.
2906 We use negative values to indicate a preference for the first operand
2907 and positive values for the second operand. */
2910 commutative_operand_precedence (rtx op)
2912 enum rtx_code code = GET_CODE (op);
2914 /* Constants always come the second operand. Prefer "nice" constants. */
2915 if (code == CONST_INT)
2917 if (code == CONST_DOUBLE)
2919 if (code == CONST_FIXED)
2921 op = avoid_constant_pool_reference (op);
2922 code = GET_CODE (op);
2924 switch (GET_RTX_CLASS (code))
2927 if (code == CONST_INT)
2929 if (code == CONST_DOUBLE)
2931 if (code == CONST_FIXED)
2936 /* SUBREGs of objects should come second. */
2937 if (code == SUBREG && OBJECT_P (SUBREG_REG (op)))
2942 /* Complex expressions should be the first, so decrease priority
2943 of objects. Prefer pointer objects over non pointer objects. */
2944 if ((REG_P (op) && REG_POINTER (op))
2945 || (MEM_P (op) && MEM_POINTER (op)))
2949 case RTX_COMM_ARITH:
2950 /* Prefer operands that are themselves commutative to be first.
2951 This helps to make things linear. In particular,
2952 (and (and (reg) (reg)) (not (reg))) is canonical. */
2956 /* If only one operand is a binary expression, it will be the first
2957 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
2958 is canonical, although it will usually be further simplified. */
2962 /* Then prefer NEG and NOT. */
2963 if (code == NEG || code == NOT)
2971 /* Return 1 iff it is necessary to swap operands of commutative operation
2972 in order to canonicalize expression. */
2975 swap_commutative_operands_p (rtx x, rtx y)
2977 return (commutative_operand_precedence (x)
2978 < commutative_operand_precedence (y));
2981 /* Return 1 if X is an autoincrement side effect and the register is
2982 not the stack pointer. */
2984 auto_inc_p (const_rtx x)
2986 switch (GET_CODE (x))
2994 /* There are no REG_INC notes for SP. */
2995 if (XEXP (x, 0) != stack_pointer_rtx)
3003 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3005 loc_mentioned_in_p (rtx *loc, const_rtx in)
3014 code = GET_CODE (in);
3015 fmt = GET_RTX_FORMAT (code);
3016 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3020 if (loc == &XEXP (in, i) || loc_mentioned_in_p (loc, XEXP (in, i)))
3023 else if (fmt[i] == 'E')
3024 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
3025 if (loc == &XVECEXP (in, i, j)
3026 || loc_mentioned_in_p (loc, XVECEXP (in, i, j)))
3032 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3033 and SUBREG_BYTE, return the bit offset where the subreg begins
3034 (counting from the least significant bit of the operand). */
3037 subreg_lsb_1 (enum machine_mode outer_mode,
3038 enum machine_mode inner_mode,
3039 unsigned int subreg_byte)
3041 unsigned int bitpos;
3045 /* A paradoxical subreg begins at bit position 0. */
3046 if (GET_MODE_BITSIZE (outer_mode) > GET_MODE_BITSIZE (inner_mode))
3049 if (WORDS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
3050 /* If the subreg crosses a word boundary ensure that
3051 it also begins and ends on a word boundary. */
3052 gcc_assert (!((subreg_byte % UNITS_PER_WORD
3053 + GET_MODE_SIZE (outer_mode)) > UNITS_PER_WORD
3054 && (subreg_byte % UNITS_PER_WORD
3055 || GET_MODE_SIZE (outer_mode) % UNITS_PER_WORD)));
3057 if (WORDS_BIG_ENDIAN)
3058 word = (GET_MODE_SIZE (inner_mode)
3059 - (subreg_byte + GET_MODE_SIZE (outer_mode))) / UNITS_PER_WORD;
3061 word = subreg_byte / UNITS_PER_WORD;
3062 bitpos = word * BITS_PER_WORD;
3064 if (BYTES_BIG_ENDIAN)
3065 byte = (GET_MODE_SIZE (inner_mode)
3066 - (subreg_byte + GET_MODE_SIZE (outer_mode))) % UNITS_PER_WORD;
3068 byte = subreg_byte % UNITS_PER_WORD;
3069 bitpos += byte * BITS_PER_UNIT;
3074 /* Given a subreg X, return the bit offset where the subreg begins
3075 (counting from the least significant bit of the reg). */
3078 subreg_lsb (const_rtx x)
3080 return subreg_lsb_1 (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
3084 /* Fill in information about a subreg of a hard register.
3085 xregno - A regno of an inner hard subreg_reg (or what will become one).
3086 xmode - The mode of xregno.
3087 offset - The byte offset.
3088 ymode - The mode of a top level SUBREG (or what may become one).
3089 info - Pointer to structure to fill in. */
3091 subreg_get_info (unsigned int xregno, enum machine_mode xmode,
3092 unsigned int offset, enum machine_mode ymode,
3093 struct subreg_info *info)
3095 int nregs_xmode, nregs_ymode;
3096 int mode_multiple, nregs_multiple;
3097 int offset_adj, y_offset, y_offset_adj;
3098 int regsize_xmode, regsize_ymode;
3101 gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
3105 /* If there are holes in a non-scalar mode in registers, we expect
3106 that it is made up of its units concatenated together. */
3107 if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode))
3109 enum machine_mode xmode_unit;
3111 nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
3112 if (GET_MODE_INNER (xmode) == VOIDmode)
3115 xmode_unit = GET_MODE_INNER (xmode);
3116 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode_unit));
3117 gcc_assert (nregs_xmode
3118 == (GET_MODE_NUNITS (xmode)
3119 * HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode_unit)));
3120 gcc_assert (hard_regno_nregs[xregno][xmode]
3121 == (hard_regno_nregs[xregno][xmode_unit]
3122 * GET_MODE_NUNITS (xmode)));
3124 /* You can only ask for a SUBREG of a value with holes in the middle
3125 if you don't cross the holes. (Such a SUBREG should be done by
3126 picking a different register class, or doing it in memory if
3127 necessary.) An example of a value with holes is XCmode on 32-bit
3128 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3129 3 for each part, but in memory it's two 128-bit parts.
3130 Padding is assumed to be at the end (not necessarily the 'high part')
3132 if ((offset / GET_MODE_SIZE (xmode_unit) + 1
3133 < GET_MODE_NUNITS (xmode))
3134 && (offset / GET_MODE_SIZE (xmode_unit)
3135 != ((offset + GET_MODE_SIZE (ymode) - 1)
3136 / GET_MODE_SIZE (xmode_unit))))
3138 info->representable_p = false;
3143 nregs_xmode = hard_regno_nregs[xregno][xmode];
3145 nregs_ymode = hard_regno_nregs[xregno][ymode];
3147 /* Paradoxical subregs are otherwise valid. */
3150 && GET_MODE_SIZE (ymode) > GET_MODE_SIZE (xmode))
3152 info->representable_p = true;
3153 /* If this is a big endian paradoxical subreg, which uses more
3154 actual hard registers than the original register, we must
3155 return a negative offset so that we find the proper highpart
3157 if (GET_MODE_SIZE (ymode) > UNITS_PER_WORD
3158 ? WORDS_BIG_ENDIAN : BYTES_BIG_ENDIAN)
3159 info->offset = nregs_xmode - nregs_ymode;
3162 info->nregs = nregs_ymode;
3166 /* If registers store different numbers of bits in the different
3167 modes, we cannot generally form this subreg. */
3168 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode)
3169 && !HARD_REGNO_NREGS_HAS_PADDING (xregno, ymode)
3170 && (GET_MODE_SIZE (xmode) % nregs_xmode) == 0
3171 && (GET_MODE_SIZE (ymode) % nregs_ymode) == 0)
3173 regsize_xmode = GET_MODE_SIZE (xmode) / nregs_xmode;
3174 regsize_ymode = GET_MODE_SIZE (ymode) / nregs_ymode;
3175 if (!rknown && regsize_xmode > regsize_ymode && nregs_ymode > 1)
3177 info->representable_p = false;
3179 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3180 info->offset = offset / regsize_xmode;
3183 if (!rknown && regsize_ymode > regsize_xmode && nregs_xmode > 1)
3185 info->representable_p = false;
3187 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3188 info->offset = offset / regsize_xmode;
3193 /* Lowpart subregs are otherwise valid. */
3194 if (!rknown && offset == subreg_lowpart_offset (ymode, xmode))
3196 info->representable_p = true;
3199 if (offset == 0 || nregs_xmode == nregs_ymode)
3202 info->nregs = nregs_ymode;
3207 /* This should always pass, otherwise we don't know how to verify
3208 the constraint. These conditions may be relaxed but
3209 subreg_regno_offset would need to be redesigned. */
3210 gcc_assert ((GET_MODE_SIZE (xmode) % GET_MODE_SIZE (ymode)) == 0);
3211 gcc_assert ((nregs_xmode % nregs_ymode) == 0);
3213 /* The XMODE value can be seen as a vector of NREGS_XMODE
3214 values. The subreg must represent a lowpart of given field.
3215 Compute what field it is. */
3216 offset_adj = offset;
3217 offset_adj -= subreg_lowpart_offset (ymode,
3218 mode_for_size (GET_MODE_BITSIZE (xmode)
3222 /* Size of ymode must not be greater than the size of xmode. */
3223 mode_multiple = GET_MODE_SIZE (xmode) / GET_MODE_SIZE (ymode);
3224 gcc_assert (mode_multiple != 0);
3226 y_offset = offset / GET_MODE_SIZE (ymode);
3227 y_offset_adj = offset_adj / GET_MODE_SIZE (ymode);
3228 nregs_multiple = nregs_xmode / nregs_ymode;
3230 gcc_assert ((offset_adj % GET_MODE_SIZE (ymode)) == 0);
3231 gcc_assert ((mode_multiple % nregs_multiple) == 0);
3235 info->representable_p = (!(y_offset_adj % (mode_multiple / nregs_multiple)));
3238 info->offset = (y_offset / (mode_multiple / nregs_multiple)) * nregs_ymode;
3239 info->nregs = nregs_ymode;
3242 /* This function returns the regno offset of a subreg expression.
3243 xregno - A regno of an inner hard subreg_reg (or what will become one).
3244 xmode - The mode of xregno.
3245 offset - The byte offset.
3246 ymode - The mode of a top level SUBREG (or what may become one).
3247 RETURN - The regno offset which would be used. */
3249 subreg_regno_offset (unsigned int xregno, enum machine_mode xmode,
3250 unsigned int offset, enum machine_mode ymode)
3252 struct subreg_info info;
3253 subreg_get_info (xregno, xmode, offset, ymode, &info);
3257 /* This function returns true when the offset is representable via
3258 subreg_offset in the given regno.
3259 xregno - A regno of an inner hard subreg_reg (or what will become one).
3260 xmode - The mode of xregno.
3261 offset - The byte offset.
3262 ymode - The mode of a top level SUBREG (or what may become one).
3263 RETURN - Whether the offset is representable. */
3265 subreg_offset_representable_p (unsigned int xregno, enum machine_mode xmode,
3266 unsigned int offset, enum machine_mode ymode)
3268 struct subreg_info info;
3269 subreg_get_info (xregno, xmode, offset, ymode, &info);
3270 return info.representable_p;
3273 /* Return the number of a YMODE register to which
3275 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3277 can be simplified. Return -1 if the subreg can't be simplified.
3279 XREGNO is a hard register number. */
3282 simplify_subreg_regno (unsigned int xregno, enum machine_mode xmode,
3283 unsigned int offset, enum machine_mode ymode)
3285 struct subreg_info info;
3286 unsigned int yregno;
3288 #ifdef CANNOT_CHANGE_MODE_CLASS
3289 /* Give the backend a chance to disallow the mode change. */
3290 if (GET_MODE_CLASS (xmode) != MODE_COMPLEX_INT
3291 && GET_MODE_CLASS (xmode) != MODE_COMPLEX_FLOAT
3292 && REG_CANNOT_CHANGE_MODE_P (xregno, xmode, ymode))
3296 /* We shouldn't simplify stack-related registers. */
3297 if ((!reload_completed || frame_pointer_needed)
3298 && (xregno == FRAME_POINTER_REGNUM
3299 || xregno == HARD_FRAME_POINTER_REGNUM))
3302 if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3303 && xregno == ARG_POINTER_REGNUM)
3306 if (xregno == STACK_POINTER_REGNUM)
3309 /* Try to get the register offset. */
3310 subreg_get_info (xregno, xmode, offset, ymode, &info);
3311 if (!info.representable_p)
3314 /* Make sure that the offsetted register value is in range. */
3315 yregno = xregno + info.offset;
3316 if (!HARD_REGISTER_NUM_P (yregno))
3319 /* See whether (reg:YMODE YREGNO) is valid.
3321 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3322 This is a kludge to work around how float/complex arguments are passed
3323 on 32-bit SPARC and should be fixed. */
3324 if (!HARD_REGNO_MODE_OK (yregno, ymode)
3325 && HARD_REGNO_MODE_OK (xregno, xmode))
3328 return (int) yregno;
3331 /* Return the final regno that a subreg expression refers to. */
3333 subreg_regno (const_rtx x)
3336 rtx subreg = SUBREG_REG (x);
3337 int regno = REGNO (subreg);
3339 ret = regno + subreg_regno_offset (regno,
3347 /* Return the number of registers that a subreg expression refers
3350 subreg_nregs (const_rtx x)
3352 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x)), x);
3355 /* Return the number of registers that a subreg REG with REGNO
3356 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
3357 changed so that the regno can be passed in. */
3360 subreg_nregs_with_regno (unsigned int regno, const_rtx x)
3362 struct subreg_info info;
3363 rtx subreg = SUBREG_REG (x);
3365 subreg_get_info (regno, GET_MODE (subreg), SUBREG_BYTE (x), GET_MODE (x),
3371 struct parms_set_data
3377 /* Helper function for noticing stores to parameter registers. */
3379 parms_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
3381 struct parms_set_data *const d = (struct parms_set_data *) data;
3382 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
3383 && TEST_HARD_REG_BIT (d->regs, REGNO (x)))
3385 CLEAR_HARD_REG_BIT (d->regs, REGNO (x));
3390 /* Look backward for first parameter to be loaded.
3391 Note that loads of all parameters will not necessarily be
3392 found if CSE has eliminated some of them (e.g., an argument
3393 to the outer function is passed down as a parameter).
3394 Do not skip BOUNDARY. */
3396 find_first_parameter_load (rtx call_insn, rtx boundary)
3398 struct parms_set_data parm;
3399 rtx p, before, first_set;
3401 /* Since different machines initialize their parameter registers
3402 in different orders, assume nothing. Collect the set of all
3403 parameter registers. */
3404 CLEAR_HARD_REG_SET (parm.regs);
3406 for (p = CALL_INSN_FUNCTION_USAGE (call_insn); p; p = XEXP (p, 1))
3407 if (GET_CODE (XEXP (p, 0)) == USE
3408 && REG_P (XEXP (XEXP (p, 0), 0)))
3410 gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER);
3412 /* We only care about registers which can hold function
3414 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p, 0), 0))))
3417 SET_HARD_REG_BIT (parm.regs, REGNO (XEXP (XEXP (p, 0), 0)));
3421 first_set = call_insn;
3423 /* Search backward for the first set of a register in this set. */
3424 while (parm.nregs && before != boundary)
3426 before = PREV_INSN (before);
3428 /* It is possible that some loads got CSEed from one call to
3429 another. Stop in that case. */
3430 if (CALL_P (before))
3433 /* Our caller needs either ensure that we will find all sets
3434 (in case code has not been optimized yet), or take care
3435 for possible labels in a way by setting boundary to preceding
3437 if (LABEL_P (before))
3439 gcc_assert (before == boundary);
3443 if (INSN_P (before))
3445 int nregs_old = parm.nregs;
3446 note_stores (PATTERN (before), parms_set, &parm);
3447 /* If we found something that did not set a parameter reg,
3448 we're done. Do not keep going, as that might result
3449 in hoisting an insn before the setting of a pseudo
3450 that is used by the hoisted insn. */
3451 if (nregs_old != parm.nregs)
3460 /* Return true if we should avoid inserting code between INSN and preceding
3461 call instruction. */
3464 keep_with_call_p (const_rtx insn)
3468 if (INSN_P (insn) && (set = single_set (insn)) != NULL)
3470 if (REG_P (SET_DEST (set))
3471 && REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER
3472 && fixed_regs[REGNO (SET_DEST (set))]
3473 && general_operand (SET_SRC (set), VOIDmode))
3475 if (REG_P (SET_SRC (set))
3476 && targetm.calls.function_value_regno_p (REGNO (SET_SRC (set)))
3477 && REG_P (SET_DEST (set))
3478 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3480 /* There may be a stack pop just after the call and before the store
3481 of the return register. Search for the actual store when deciding
3482 if we can break or not. */
3483 if (SET_DEST (set) == stack_pointer_rtx)
3485 /* This CONST_CAST is okay because next_nonnote_insn just
3486 returns its argument and we assign it to a const_rtx
3488 const_rtx i2 = next_nonnote_insn (CONST_CAST_RTX(insn));
3489 if (i2 && keep_with_call_p (i2))
3496 /* Return true if LABEL is a target of JUMP_INSN. This applies only
3497 to non-complex jumps. That is, direct unconditional, conditional,
3498 and tablejumps, but not computed jumps or returns. It also does
3499 not apply to the fallthru case of a conditional jump. */
3502 label_is_jump_target_p (const_rtx label, const_rtx jump_insn)
3504 rtx tmp = JUMP_LABEL (jump_insn);
3509 if (tablejump_p (jump_insn, NULL, &tmp))
3511 rtvec vec = XVEC (PATTERN (tmp),
3512 GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC);
3513 int i, veclen = GET_NUM_ELEM (vec);
3515 for (i = 0; i < veclen; ++i)
3516 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
3520 if (find_reg_note (jump_insn, REG_LABEL_TARGET, label))
3527 /* Return an estimate of the cost of computing rtx X.
3528 One use is in cse, to decide which expression to keep in the hash table.
3529 Another is in rtl generation, to pick the cheapest way to multiply.
3530 Other uses like the latter are expected in the future.
3532 SPEED parameter specify whether costs optimized for speed or size should
3536 rtx_cost (rtx x, enum rtx_code outer_code ATTRIBUTE_UNUSED, bool speed)
3546 /* Compute the default costs of certain things.
3547 Note that targetm.rtx_costs can override the defaults. */
3549 code = GET_CODE (x);
3553 total = COSTS_N_INSNS (5);
3559 total = COSTS_N_INSNS (7);
3562 /* Used in combine.c as a marker. */
3566 total = COSTS_N_INSNS (1);
3576 /* If we can't tie these modes, make this expensive. The larger
3577 the mode, the more expensive it is. */
3578 if (! MODES_TIEABLE_P (GET_MODE (x), GET_MODE (SUBREG_REG (x))))
3579 return COSTS_N_INSNS (2
3580 + GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD);
3584 if (targetm.rtx_costs (x, code, outer_code, &total, speed))
3589 /* Sum the costs of the sub-rtx's, plus cost of this operation,
3590 which is already in total. */
3592 fmt = GET_RTX_FORMAT (code);
3593 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3595 total += rtx_cost (XEXP (x, i), code, speed);
3596 else if (fmt[i] == 'E')
3597 for (j = 0; j < XVECLEN (x, i); j++)
3598 total += rtx_cost (XVECEXP (x, i, j), code, speed);
3603 /* Return cost of address expression X.
3604 Expect that X is properly formed address reference.
3606 SPEED parameter specify whether costs optimized for speed or size should
3610 address_cost (rtx x, enum machine_mode mode, addr_space_t as, bool speed)
3612 /* We may be asked for cost of various unusual addresses, such as operands
3613 of push instruction. It is not worthwhile to complicate writing
3614 of the target hook by such cases. */
3616 if (!memory_address_addr_space_p (mode, x, as))
3619 return targetm.address_cost (x, speed);
3622 /* If the target doesn't override, compute the cost as with arithmetic. */
3625 default_address_cost (rtx x, bool speed)
3627 return rtx_cost (x, MEM, speed);
3631 unsigned HOST_WIDE_INT
3632 nonzero_bits (const_rtx x, enum machine_mode mode)
3634 return cached_nonzero_bits (x, mode, NULL_RTX, VOIDmode, 0);
3638 num_sign_bit_copies (const_rtx x, enum machine_mode mode)
3640 return cached_num_sign_bit_copies (x, mode, NULL_RTX, VOIDmode, 0);
3643 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
3644 It avoids exponential behavior in nonzero_bits1 when X has
3645 identical subexpressions on the first or the second level. */
3647 static unsigned HOST_WIDE_INT
3648 cached_nonzero_bits (const_rtx x, enum machine_mode mode, const_rtx known_x,
3649 enum machine_mode known_mode,
3650 unsigned HOST_WIDE_INT known_ret)
3652 if (x == known_x && mode == known_mode)
3655 /* Try to find identical subexpressions. If found call
3656 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
3657 precomputed value for the subexpression as KNOWN_RET. */
3659 if (ARITHMETIC_P (x))
3661 rtx x0 = XEXP (x, 0);
3662 rtx x1 = XEXP (x, 1);
3664 /* Check the first level. */
3666 return nonzero_bits1 (x, mode, x0, mode,
3667 cached_nonzero_bits (x0, mode, known_x,
3668 known_mode, known_ret));
3670 /* Check the second level. */
3671 if (ARITHMETIC_P (x0)
3672 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
3673 return nonzero_bits1 (x, mode, x1, mode,
3674 cached_nonzero_bits (x1, mode, known_x,
3675 known_mode, known_ret));
3677 if (ARITHMETIC_P (x1)
3678 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
3679 return nonzero_bits1 (x, mode, x0, mode,
3680 cached_nonzero_bits (x0, mode, known_x,
3681 known_mode, known_ret));
3684 return nonzero_bits1 (x, mode, known_x, known_mode, known_ret);
3687 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
3688 We don't let nonzero_bits recur into num_sign_bit_copies, because that
3689 is less useful. We can't allow both, because that results in exponential
3690 run time recursion. There is a nullstone testcase that triggered
3691 this. This macro avoids accidental uses of num_sign_bit_copies. */
3692 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
3694 /* Given an expression, X, compute which bits in X can be nonzero.
3695 We don't care about bits outside of those defined in MODE.
3697 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
3698 an arithmetic operation, we can do better. */
3700 static unsigned HOST_WIDE_INT
3701 nonzero_bits1 (const_rtx x, enum machine_mode mode, const_rtx known_x,
3702 enum machine_mode known_mode,
3703 unsigned HOST_WIDE_INT known_ret)
3705 unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
3706 unsigned HOST_WIDE_INT inner_nz;
3708 unsigned int mode_width = GET_MODE_BITSIZE (mode);
3710 /* For floating-point and vector values, assume all bits are needed. */
3711 if (FLOAT_MODE_P (GET_MODE (x)) || FLOAT_MODE_P (mode)
3712 || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
3715 /* If X is wider than MODE, use its mode instead. */
3716 if (GET_MODE_BITSIZE (GET_MODE (x)) > mode_width)
3718 mode = GET_MODE (x);
3719 nonzero = GET_MODE_MASK (mode);
3720 mode_width = GET_MODE_BITSIZE (mode);
3723 if (mode_width > HOST_BITS_PER_WIDE_INT)
3724 /* Our only callers in this case look for single bit values. So
3725 just return the mode mask. Those tests will then be false. */
3728 #ifndef WORD_REGISTER_OPERATIONS
3729 /* If MODE is wider than X, but both are a single word for both the host
3730 and target machines, we can compute this from which bits of the
3731 object might be nonzero in its own mode, taking into account the fact
3732 that on many CISC machines, accessing an object in a wider mode
3733 causes the high-order bits to become undefined. So they are
3734 not known to be zero. */
3736 if (GET_MODE (x) != VOIDmode && GET_MODE (x) != mode
3737 && GET_MODE_BITSIZE (GET_MODE (x)) <= BITS_PER_WORD
3738 && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
3739 && GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (GET_MODE (x)))
3741 nonzero &= cached_nonzero_bits (x, GET_MODE (x),
3742 known_x, known_mode, known_ret);
3743 nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x));
3748 code = GET_CODE (x);
3752 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
3753 /* If pointers extend unsigned and this is a pointer in Pmode, say that
3754 all the bits above ptr_mode are known to be zero. */
3755 /* As we do not know which address space the pointer is refering to,
3756 we can do this only if the target does not support different pointer
3757 or address modes depending on the address space. */
3758 if (target_default_pointer_address_modes_p ()
3759 && POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
3761 nonzero &= GET_MODE_MASK (ptr_mode);
3764 /* Include declared information about alignment of pointers. */
3765 /* ??? We don't properly preserve REG_POINTER changes across
3766 pointer-to-integer casts, so we can't trust it except for
3767 things that we know must be pointers. See execute/960116-1.c. */
3768 if ((x == stack_pointer_rtx
3769 || x == frame_pointer_rtx
3770 || x == arg_pointer_rtx)
3771 && REGNO_POINTER_ALIGN (REGNO (x)))
3773 unsigned HOST_WIDE_INT alignment
3774 = REGNO_POINTER_ALIGN (REGNO (x)) / BITS_PER_UNIT;
3776 #ifdef PUSH_ROUNDING
3777 /* If PUSH_ROUNDING is defined, it is possible for the
3778 stack to be momentarily aligned only to that amount,
3779 so we pick the least alignment. */
3780 if (x == stack_pointer_rtx && PUSH_ARGS)
3781 alignment = MIN ((unsigned HOST_WIDE_INT) PUSH_ROUNDING (1),
3785 nonzero &= ~(alignment - 1);
3789 unsigned HOST_WIDE_INT nonzero_for_hook = nonzero;
3790 rtx new_rtx = rtl_hooks.reg_nonzero_bits (x, mode, known_x,
3791 known_mode, known_ret,
3795 nonzero_for_hook &= cached_nonzero_bits (new_rtx, mode, known_x,
3796 known_mode, known_ret);
3798 return nonzero_for_hook;
3802 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
3803 /* If X is negative in MODE, sign-extend the value. */
3804 if (INTVAL (x) > 0 && mode_width < BITS_PER_WORD
3805 && 0 != (INTVAL (x) & ((HOST_WIDE_INT) 1 << (mode_width - 1))))
3806 return (INTVAL (x) | ((HOST_WIDE_INT) (-1) << mode_width));
3812 #ifdef LOAD_EXTEND_OP
3813 /* In many, if not most, RISC machines, reading a byte from memory
3814 zeros the rest of the register. Noticing that fact saves a lot
3815 of extra zero-extends. */
3816 if (LOAD_EXTEND_OP (GET_MODE (x)) == ZERO_EXTEND)
3817 nonzero &= GET_MODE_MASK (GET_MODE (x));
3822 case UNEQ: case LTGT:
3823 case GT: case GTU: case UNGT:
3824 case LT: case LTU: case UNLT:
3825 case GE: case GEU: case UNGE:
3826 case LE: case LEU: case UNLE:
3827 case UNORDERED: case ORDERED:
3828 /* If this produces an integer result, we know which bits are set.
3829 Code here used to clear bits outside the mode of X, but that is
3831 /* Mind that MODE is the mode the caller wants to look at this
3832 operation in, and not the actual operation mode. We can wind
3833 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
3834 that describes the results of a vector compare. */
3835 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
3836 && mode_width <= HOST_BITS_PER_WIDE_INT)
3837 nonzero = STORE_FLAG_VALUE;
3842 /* Disabled to avoid exponential mutual recursion between nonzero_bits
3843 and num_sign_bit_copies. */
3844 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
3845 == GET_MODE_BITSIZE (GET_MODE (x)))
3849 if (GET_MODE_SIZE (GET_MODE (x)) < mode_width)
3850 nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x)));
3855 /* Disabled to avoid exponential mutual recursion between nonzero_bits
3856 and num_sign_bit_copies. */
3857 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
3858 == GET_MODE_BITSIZE (GET_MODE (x)))
3864 nonzero &= (cached_nonzero_bits (XEXP (x, 0), mode,
3865 known_x, known_mode, known_ret)
3866 & GET_MODE_MASK (mode));
3870 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
3871 known_x, known_mode, known_ret);
3872 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
3873 nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
3877 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
3878 Otherwise, show all the bits in the outer mode but not the inner
3880 inner_nz = cached_nonzero_bits (XEXP (x, 0), mode,
3881 known_x, known_mode, known_ret);
3882 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
3884 inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
3886 & (((HOST_WIDE_INT) 1
3887 << (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) - 1))))
3888 inner_nz |= (GET_MODE_MASK (mode)
3889 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
3892 nonzero &= inner_nz;
3896 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
3897 known_x, known_mode, known_ret)
3898 & cached_nonzero_bits (XEXP (x, 1), mode,
3899 known_x, known_mode, known_ret);
3903 case UMIN: case UMAX: case SMIN: case SMAX:
3905 unsigned HOST_WIDE_INT nonzero0 =
3906 cached_nonzero_bits (XEXP (x, 0), mode,
3907 known_x, known_mode, known_ret);
3909 /* Don't call nonzero_bits for the second time if it cannot change
3911 if ((nonzero & nonzero0) != nonzero)
3913 | cached_nonzero_bits (XEXP (x, 1), mode,
3914 known_x, known_mode, known_ret);
3918 case PLUS: case MINUS:
3920 case DIV: case UDIV:
3921 case MOD: case UMOD:
3922 /* We can apply the rules of arithmetic to compute the number of
3923 high- and low-order zero bits of these operations. We start by
3924 computing the width (position of the highest-order nonzero bit)
3925 and the number of low-order zero bits for each value. */
3927 unsigned HOST_WIDE_INT nz0 =
3928 cached_nonzero_bits (XEXP (x, 0), mode,
3929 known_x, known_mode, known_ret);
3930 unsigned HOST_WIDE_INT nz1 =
3931 cached_nonzero_bits (XEXP (x, 1), mode,
3932 known_x, known_mode, known_ret);
3933 int sign_index = GET_MODE_BITSIZE (GET_MODE (x)) - 1;
3934 int width0 = floor_log2 (nz0) + 1;
3935 int width1 = floor_log2 (nz1) + 1;
3936 int low0 = floor_log2 (nz0 & -nz0);
3937 int low1 = floor_log2 (nz1 & -nz1);
3938 HOST_WIDE_INT op0_maybe_minusp
3939 = (nz0 & ((HOST_WIDE_INT) 1 << sign_index));
3940 HOST_WIDE_INT op1_maybe_minusp
3941 = (nz1 & ((HOST_WIDE_INT) 1 << sign_index));
3942 unsigned int result_width = mode_width;
3948 result_width = MAX (width0, width1) + 1;
3949 result_low = MIN (low0, low1);
3952 result_low = MIN (low0, low1);
3955 result_width = width0 + width1;
3956 result_low = low0 + low1;
3961 if (! op0_maybe_minusp && ! op1_maybe_minusp)
3962 result_width = width0;
3967 result_width = width0;
3972 if (! op0_maybe_minusp && ! op1_maybe_minusp)
3973 result_width = MIN (width0, width1);
3974 result_low = MIN (low0, low1);
3979 result_width = MIN (width0, width1);
3980 result_low = MIN (low0, low1);
3986 if (result_width < mode_width)
3987 nonzero &= ((HOST_WIDE_INT) 1 << result_width) - 1;
3990 nonzero &= ~(((HOST_WIDE_INT) 1 << result_low) - 1);
3992 #ifdef POINTERS_EXTEND_UNSIGNED
3993 /* If pointers extend unsigned and this is an addition or subtraction
3994 to a pointer in Pmode, all the bits above ptr_mode are known to be
3996 /* As we do not know which address space the pointer is refering to,
3997 we can do this only if the target does not support different pointer
3998 or address modes depending on the address space. */
3999 if (target_default_pointer_address_modes_p ()
4000 && POINTERS_EXTEND_UNSIGNED > 0 && GET_MODE (x) == Pmode
4001 && (code == PLUS || code == MINUS)
4002 && REG_P (XEXP (x, 0)) && REG_POINTER (XEXP (x, 0)))
4003 nonzero &= GET_MODE_MASK (ptr_mode);
4009 if (CONST_INT_P (XEXP (x, 1))
4010 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
4011 nonzero &= ((HOST_WIDE_INT) 1 << INTVAL (XEXP (x, 1))) - 1;
4015 /* If this is a SUBREG formed for a promoted variable that has
4016 been zero-extended, we know that at least the high-order bits
4017 are zero, though others might be too. */
4019 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x) > 0)
4020 nonzero = GET_MODE_MASK (GET_MODE (x))
4021 & cached_nonzero_bits (SUBREG_REG (x), GET_MODE (x),
4022 known_x, known_mode, known_ret);
4024 /* If the inner mode is a single word for both the host and target
4025 machines, we can compute this from which bits of the inner
4026 object might be nonzero. */
4027 if (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) <= BITS_PER_WORD
4028 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x)))
4029 <= HOST_BITS_PER_WIDE_INT))
4031 nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
4032 known_x, known_mode, known_ret);
4034 #if defined (WORD_REGISTER_OPERATIONS) && defined (LOAD_EXTEND_OP)
4035 /* If this is a typical RISC machine, we only have to worry
4036 about the way loads are extended. */
4037 if ((LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND
4039 & (((unsigned HOST_WIDE_INT) 1
4040 << (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) - 1))))
4042 : LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) != ZERO_EXTEND)
4043 || !MEM_P (SUBREG_REG (x)))
4046 /* On many CISC machines, accessing an object in a wider mode
4047 causes the high-order bits to become undefined. So they are
4048 not known to be zero. */
4049 if (GET_MODE_SIZE (GET_MODE (x))
4050 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
4051 nonzero |= (GET_MODE_MASK (GET_MODE (x))
4052 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x))));
4061 /* The nonzero bits are in two classes: any bits within MODE
4062 that aren't in GET_MODE (x) are always significant. The rest of the
4063 nonzero bits are those that are significant in the operand of
4064 the shift when shifted the appropriate number of bits. This
4065 shows that high-order bits are cleared by the right shift and
4066 low-order bits by left shifts. */
4067 if (CONST_INT_P (XEXP (x, 1))
4068 && INTVAL (XEXP (x, 1)) >= 0
4069 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
4070 && INTVAL (XEXP (x, 1)) < GET_MODE_BITSIZE (GET_MODE (x)))
4072 enum machine_mode inner_mode = GET_MODE (x);
4073 unsigned int width = GET_MODE_BITSIZE (inner_mode);
4074 int count = INTVAL (XEXP (x, 1));
4075 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (inner_mode);
4076 unsigned HOST_WIDE_INT op_nonzero =
4077 cached_nonzero_bits (XEXP (x, 0), mode,
4078 known_x, known_mode, known_ret);
4079 unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
4080 unsigned HOST_WIDE_INT outer = 0;
4082 if (mode_width > width)
4083 outer = (op_nonzero & nonzero & ~mode_mask);
4085 if (code == LSHIFTRT)
4087 else if (code == ASHIFTRT)
4091 /* If the sign bit may have been nonzero before the shift, we
4092 need to mark all the places it could have been copied to
4093 by the shift as possibly nonzero. */
4094 if (inner & ((HOST_WIDE_INT) 1 << (width - 1 - count)))
4095 inner |= (((HOST_WIDE_INT) 1 << count) - 1) << (width - count);
4097 else if (code == ASHIFT)
4100 inner = ((inner << (count % width)
4101 | (inner >> (width - (count % width)))) & mode_mask);
4103 nonzero &= (outer | inner);
4109 /* This is at most the number of bits in the mode. */
4110 nonzero = ((HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1;
4114 /* If CLZ has a known value at zero, then the nonzero bits are
4115 that value, plus the number of bits in the mode minus one. */
4116 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4117 nonzero |= ((HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4123 /* If CTZ has a known value at zero, then the nonzero bits are
4124 that value, plus the number of bits in the mode minus one. */
4125 if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4126 nonzero |= ((HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4137 unsigned HOST_WIDE_INT nonzero_true =
4138 cached_nonzero_bits (XEXP (x, 1), mode,
4139 known_x, known_mode, known_ret);
4141 /* Don't call nonzero_bits for the second time if it cannot change
4143 if ((nonzero & nonzero_true) != nonzero)
4144 nonzero &= nonzero_true
4145 | cached_nonzero_bits (XEXP (x, 2), mode,
4146 known_x, known_mode, known_ret);
4157 /* See the macro definition above. */
4158 #undef cached_num_sign_bit_copies
4161 /* The function cached_num_sign_bit_copies is a wrapper around
4162 num_sign_bit_copies1. It avoids exponential behavior in
4163 num_sign_bit_copies1 when X has identical subexpressions on the
4164 first or the second level. */
4167 cached_num_sign_bit_copies (const_rtx x, enum machine_mode mode, const_rtx known_x,
4168 enum machine_mode known_mode,
4169 unsigned int known_ret)
4171 if (x == known_x && mode == known_mode)
4174 /* Try to find identical subexpressions. If found call
4175 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4176 the precomputed value for the subexpression as KNOWN_RET. */
4178 if (ARITHMETIC_P (x))
4180 rtx x0 = XEXP (x, 0);
4181 rtx x1 = XEXP (x, 1);
4183 /* Check the first level. */
4186 num_sign_bit_copies1 (x, mode, x0, mode,
4187 cached_num_sign_bit_copies (x0, mode, known_x,
4191 /* Check the second level. */
4192 if (ARITHMETIC_P (x0)
4193 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4195 num_sign_bit_copies1 (x, mode, x1, mode,
4196 cached_num_sign_bit_copies (x1, mode, known_x,
4200 if (ARITHMETIC_P (x1)
4201 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4203 num_sign_bit_copies1 (x, mode, x0, mode,
4204 cached_num_sign_bit_copies (x0, mode, known_x,
4209 return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
4212 /* Return the number of bits at the high-order end of X that are known to
4213 be equal to the sign bit. X will be used in mode MODE; if MODE is
4214 VOIDmode, X will be used in its own mode. The returned value will always
4215 be between 1 and the number of bits in MODE. */
4218 num_sign_bit_copies1 (const_rtx x, enum machine_mode mode, const_rtx known_x,
4219 enum machine_mode known_mode,
4220 unsigned int known_ret)
4222 enum rtx_code code = GET_CODE (x);
4223 unsigned int bitwidth = GET_MODE_BITSIZE (mode);
4224 int num0, num1, result;
4225 unsigned HOST_WIDE_INT nonzero;
4227 /* If we weren't given a mode, use the mode of X. If the mode is still
4228 VOIDmode, we don't know anything. Likewise if one of the modes is
4231 if (mode == VOIDmode)
4232 mode = GET_MODE (x);
4234 if (mode == VOIDmode || FLOAT_MODE_P (mode) || FLOAT_MODE_P (GET_MODE (x))
4235 || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
4238 /* For a smaller object, just ignore the high bits. */
4239 if (bitwidth < GET_MODE_BITSIZE (GET_MODE (x)))
4241 num0 = cached_num_sign_bit_copies (x, GET_MODE (x),
4242 known_x, known_mode, known_ret);
4244 num0 - (int) (GET_MODE_BITSIZE (GET_MODE (x)) - bitwidth));
4247 if (GET_MODE (x) != VOIDmode && bitwidth > GET_MODE_BITSIZE (GET_MODE (x)))
4249 #ifndef WORD_REGISTER_OPERATIONS
4250 /* If this machine does not do all register operations on the entire
4251 register and MODE is wider than the mode of X, we can say nothing
4252 at all about the high-order bits. */
4255 /* Likewise on machines that do, if the mode of the object is smaller
4256 than a word and loads of that size don't sign extend, we can say
4257 nothing about the high order bits. */
4258 if (GET_MODE_BITSIZE (GET_MODE (x)) < BITS_PER_WORD
4259 #ifdef LOAD_EXTEND_OP
4260 && LOAD_EXTEND_OP (GET_MODE (x)) != SIGN_EXTEND
4271 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4272 /* If pointers extend signed and this is a pointer in Pmode, say that
4273 all the bits above ptr_mode are known to be sign bit copies. */
4274 /* As we do not know which address space the pointer is refering to,
4275 we can do this only if the target does not support different pointer
4276 or address modes depending on the address space. */
4277 if (target_default_pointer_address_modes_p ()
4278 && ! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
4279 && mode == Pmode && REG_POINTER (x))
4280 return GET_MODE_BITSIZE (Pmode) - GET_MODE_BITSIZE (ptr_mode) + 1;
4284 unsigned int copies_for_hook = 1, copies = 1;
4285 rtx new_rtx = rtl_hooks.reg_num_sign_bit_copies (x, mode, known_x,
4286 known_mode, known_ret,
4290 copies = cached_num_sign_bit_copies (new_rtx, mode, known_x,
4291 known_mode, known_ret);
4293 if (copies > 1 || copies_for_hook > 1)
4294 return MAX (copies, copies_for_hook);
4296 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
4301 #ifdef LOAD_EXTEND_OP
4302 /* Some RISC machines sign-extend all loads of smaller than a word. */
4303 if (LOAD_EXTEND_OP (GET_MODE (x)) == SIGN_EXTEND)
4304 return MAX (1, ((int) bitwidth
4305 - (int) GET_MODE_BITSIZE (GET_MODE (x)) + 1));
4310 /* If the constant is negative, take its 1's complement and remask.
4311 Then see how many zero bits we have. */
4312 nonzero = INTVAL (x) & GET_MODE_MASK (mode);
4313 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4314 && (nonzero & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4315 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4317 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4320 /* If this is a SUBREG for a promoted object that is sign-extended
4321 and we are looking at it in a wider mode, we know that at least the
4322 high-order bits are known to be sign bit copies. */
4324 if (SUBREG_PROMOTED_VAR_P (x) && ! SUBREG_PROMOTED_UNSIGNED_P (x))
4326 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4327 known_x, known_mode, known_ret);
4328 return MAX ((int) bitwidth
4329 - (int) GET_MODE_BITSIZE (GET_MODE (x)) + 1,
4333 /* For a smaller object, just ignore the high bits. */
4334 if (bitwidth <= GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))))
4336 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), VOIDmode,
4337 known_x, known_mode, known_ret);
4338 return MAX (1, (num0
4339 - (int) (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x)))
4343 #ifdef WORD_REGISTER_OPERATIONS
4344 #ifdef LOAD_EXTEND_OP
4345 /* For paradoxical SUBREGs on machines where all register operations
4346 affect the entire register, just look inside. Note that we are
4347 passing MODE to the recursive call, so the number of sign bit copies
4348 will remain relative to that mode, not the inner mode. */
4350 /* This works only if loads sign extend. Otherwise, if we get a
4351 reload for the inner part, it may be loaded from the stack, and
4352 then we lose all sign bit copies that existed before the store
4355 if ((GET_MODE_SIZE (GET_MODE (x))
4356 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
4357 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND
4358 && MEM_P (SUBREG_REG (x)))
4359 return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4360 known_x, known_mode, known_ret);
4366 if (CONST_INT_P (XEXP (x, 1)))
4367 return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
4371 return (bitwidth - GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
4372 + cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4373 known_x, known_mode, known_ret));
4376 /* For a smaller object, just ignore the high bits. */
4377 num0 = cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4378 known_x, known_mode, known_ret);
4379 return MAX (1, (num0 - (int) (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
4383 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4384 known_x, known_mode, known_ret);
4386 case ROTATE: case ROTATERT:
4387 /* If we are rotating left by a number of bits less than the number
4388 of sign bit copies, we can just subtract that amount from the
4390 if (CONST_INT_P (XEXP (x, 1))
4391 && INTVAL (XEXP (x, 1)) >= 0
4392 && INTVAL (XEXP (x, 1)) < (int) bitwidth)
4394 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4395 known_x, known_mode, known_ret);
4396 return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
4397 : (int) bitwidth - INTVAL (XEXP (x, 1))));
4402 /* In general, this subtracts one sign bit copy. But if the value
4403 is known to be positive, the number of sign bit copies is the
4404 same as that of the input. Finally, if the input has just one bit
4405 that might be nonzero, all the bits are copies of the sign bit. */
4406 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4407 known_x, known_mode, known_ret);
4408 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4409 return num0 > 1 ? num0 - 1 : 1;
4411 nonzero = nonzero_bits (XEXP (x, 0), mode);
4416 && (((HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero))
4421 case IOR: case AND: case XOR:
4422 case SMIN: case SMAX: case UMIN: case UMAX:
4423 /* Logical operations will preserve the number of sign-bit copies.
4424 MIN and MAX operations always return one of the operands. */
4425 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4426 known_x, known_mode, known_ret);
4427 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4428 known_x, known_mode, known_ret);
4430 /* If num1 is clearing some of the top bits then regardless of
4431 the other term, we are guaranteed to have at least that many
4432 high-order zero bits. */
4435 && bitwidth <= HOST_BITS_PER_WIDE_INT
4436 && CONST_INT_P (XEXP (x, 1))
4437 && !(INTVAL (XEXP (x, 1)) & ((HOST_WIDE_INT) 1 << (bitwidth - 1))))
4440 /* Similarly for IOR when setting high-order bits. */
4443 && bitwidth <= HOST_BITS_PER_WIDE_INT
4444 && CONST_INT_P (XEXP (x, 1))
4445 && (INTVAL (XEXP (x, 1)) & ((HOST_WIDE_INT) 1 << (bitwidth - 1))))
4448 return MIN (num0, num1);
4450 case PLUS: case MINUS:
4451 /* For addition and subtraction, we can have a 1-bit carry. However,
4452 if we are subtracting 1 from a positive number, there will not
4453 be such a carry. Furthermore, if the positive number is known to
4454 be 0 or 1, we know the result is either -1 or 0. */
4456 if (code == PLUS && XEXP (x, 1) == constm1_rtx
4457 && bitwidth <= HOST_BITS_PER_WIDE_INT)
4459 nonzero = nonzero_bits (XEXP (x, 0), mode);
4460 if ((((HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero) == 0)
4461 return (nonzero == 1 || nonzero == 0 ? bitwidth
4462 : bitwidth - floor_log2 (nonzero) - 1);
4465 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4466 known_x, known_mode, known_ret);
4467 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4468 known_x, known_mode, known_ret);
4469 result = MAX (1, MIN (num0, num1) - 1);
4471 #ifdef POINTERS_EXTEND_UNSIGNED
4472 /* If pointers extend signed and this is an addition or subtraction
4473 to a pointer in Pmode, all the bits above ptr_mode are known to be
4475 /* As we do not know which address space the pointer is refering to,
4476 we can do this only if the target does not support different pointer
4477 or address modes depending on the address space. */
4478 if (target_default_pointer_address_modes_p ()
4479 && ! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
4480 && (code == PLUS || code == MINUS)
4481 && REG_P (XEXP (x, 0)) && REG_POINTER (XEXP (x, 0)))
4482 result = MAX ((int) (GET_MODE_BITSIZE (Pmode)
4483 - GET_MODE_BITSIZE (ptr_mode) + 1),
4489 /* The number of bits of the product is the sum of the number of
4490 bits of both terms. However, unless one of the terms if known
4491 to be positive, we must allow for an additional bit since negating
4492 a negative number can remove one sign bit copy. */
4494 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4495 known_x, known_mode, known_ret);
4496 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4497 known_x, known_mode, known_ret);
4499 result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
4501 && (bitwidth > HOST_BITS_PER_WIDE_INT
4502 || (((nonzero_bits (XEXP (x, 0), mode)
4503 & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4504 && ((nonzero_bits (XEXP (x, 1), mode)
4505 & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))))
4508 return MAX (1, result);
4511 /* The result must be <= the first operand. If the first operand
4512 has the high bit set, we know nothing about the number of sign
4514 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4516 else if ((nonzero_bits (XEXP (x, 0), mode)
4517 & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4520 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4521 known_x, known_mode, known_ret);
4524 /* The result must be <= the second operand. If the second operand
4525 has (or just might have) the high bit set, we know nothing about
4526 the number of sign bit copies. */
4527 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4529 else if ((nonzero_bits (XEXP (x, 1), mode)
4530 & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4533 return cached_num_sign_bit_copies (XEXP (x, 1), mode,
4534 known_x, known_mode, known_ret);
4537 /* Similar to unsigned division, except that we have to worry about
4538 the case where the divisor is negative, in which case we have
4540 result = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4541 known_x, known_mode, known_ret);
4543 && (bitwidth > HOST_BITS_PER_WIDE_INT
4544 || (nonzero_bits (XEXP (x, 1), mode)
4545 & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
4551 result = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4552 known_x, known_mode, known_ret);
4554 && (bitwidth > HOST_BITS_PER_WIDE_INT
4555 || (nonzero_bits (XEXP (x, 1), mode)
4556 & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
4562 /* Shifts by a constant add to the number of bits equal to the
4564 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4565 known_x, known_mode, known_ret);
4566 if (CONST_INT_P (XEXP (x, 1))
4567 && INTVAL (XEXP (x, 1)) > 0
4568 && INTVAL (XEXP (x, 1)) < GET_MODE_BITSIZE (GET_MODE (x)))
4569 num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
4574 /* Left shifts destroy copies. */
4575 if (!CONST_INT_P (XEXP (x, 1))
4576 || INTVAL (XEXP (x, 1)) < 0
4577 || INTVAL (XEXP (x, 1)) >= (int) bitwidth
4578 || INTVAL (XEXP (x, 1)) >= GET_MODE_BITSIZE (GET_MODE (x)))
4581 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4582 known_x, known_mode, known_ret);
4583 return MAX (1, num0 - INTVAL (XEXP (x, 1)));
4586 num0 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4587 known_x, known_mode, known_ret);
4588 num1 = cached_num_sign_bit_copies (XEXP (x, 2), mode,
4589 known_x, known_mode, known_ret);
4590 return MIN (num0, num1);
4592 case EQ: case NE: case GE: case GT: case LE: case LT:
4593 case UNEQ: case LTGT: case UNGE: case UNGT: case UNLE: case UNLT:
4594 case GEU: case GTU: case LEU: case LTU:
4595 case UNORDERED: case ORDERED:
4596 /* If the constant is negative, take its 1's complement and remask.
4597 Then see how many zero bits we have. */
4598 nonzero = STORE_FLAG_VALUE;
4599 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4600 && (nonzero & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4601 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4603 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4609 /* If we haven't been able to figure it out by one of the above rules,
4610 see if some of the high-order bits are known to be zero. If so,
4611 count those bits and return one less than that amount. If we can't
4612 safely compute the mask for this mode, always return BITWIDTH. */
4614 bitwidth = GET_MODE_BITSIZE (mode);
4615 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4618 nonzero = nonzero_bits (x, mode);
4619 return nonzero & ((HOST_WIDE_INT) 1 << (bitwidth - 1))
4620 ? 1 : bitwidth - floor_log2 (nonzero) - 1;
4623 /* Calculate the rtx_cost of a single instruction. A return value of
4624 zero indicates an instruction pattern without a known cost. */
4627 insn_rtx_cost (rtx pat, bool speed)
4632 /* Extract the single set rtx from the instruction pattern.
4633 We can't use single_set since we only have the pattern. */
4634 if (GET_CODE (pat) == SET)
4636 else if (GET_CODE (pat) == PARALLEL)
4639 for (i = 0; i < XVECLEN (pat, 0); i++)
4641 rtx x = XVECEXP (pat, 0, i);
4642 if (GET_CODE (x) == SET)
4655 cost = rtx_cost (SET_SRC (set), SET, speed);
4656 return cost > 0 ? cost : COSTS_N_INSNS (1);
4659 /* Given an insn INSN and condition COND, return the condition in a
4660 canonical form to simplify testing by callers. Specifically:
4662 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
4663 (2) Both operands will be machine operands; (cc0) will have been replaced.
4664 (3) If an operand is a constant, it will be the second operand.
4665 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
4666 for GE, GEU, and LEU.
4668 If the condition cannot be understood, or is an inequality floating-point
4669 comparison which needs to be reversed, 0 will be returned.
4671 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
4673 If EARLIEST is nonzero, it is a pointer to a place where the earliest
4674 insn used in locating the condition was found. If a replacement test
4675 of the condition is desired, it should be placed in front of that
4676 insn and we will be sure that the inputs are still valid.
4678 If WANT_REG is nonzero, we wish the condition to be relative to that
4679 register, if possible. Therefore, do not canonicalize the condition
4680 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
4681 to be a compare to a CC mode register.
4683 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
4687 canonicalize_condition (rtx insn, rtx cond, int reverse, rtx *earliest,
4688 rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
4695 int reverse_code = 0;
4696 enum machine_mode mode;
4697 basic_block bb = BLOCK_FOR_INSN (insn);
4699 code = GET_CODE (cond);
4700 mode = GET_MODE (cond);
4701 op0 = XEXP (cond, 0);
4702 op1 = XEXP (cond, 1);
4705 code = reversed_comparison_code (cond, insn);
4706 if (code == UNKNOWN)
4712 /* If we are comparing a register with zero, see if the register is set
4713 in the previous insn to a COMPARE or a comparison operation. Perform
4714 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
4717 while ((GET_RTX_CLASS (code) == RTX_COMPARE
4718 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
4719 && op1 == CONST0_RTX (GET_MODE (op0))
4722 /* Set nonzero when we find something of interest. */
4726 /* If comparison with cc0, import actual comparison from compare
4730 if ((prev = prev_nonnote_insn (prev)) == 0
4731 || !NONJUMP_INSN_P (prev)
4732 || (set = single_set (prev)) == 0
4733 || SET_DEST (set) != cc0_rtx)
4736 op0 = SET_SRC (set);
4737 op1 = CONST0_RTX (GET_MODE (op0));
4743 /* If this is a COMPARE, pick up the two things being compared. */
4744 if (GET_CODE (op0) == COMPARE)
4746 op1 = XEXP (op0, 1);
4747 op0 = XEXP (op0, 0);
4750 else if (!REG_P (op0))
4753 /* Go back to the previous insn. Stop if it is not an INSN. We also
4754 stop if it isn't a single set or if it has a REG_INC note because
4755 we don't want to bother dealing with it. */
4758 prev = prev_nonnote_insn (prev);
4759 while (prev && DEBUG_INSN_P (prev));
4762 || !NONJUMP_INSN_P (prev)
4763 || FIND_REG_INC_NOTE (prev, NULL_RTX)
4764 /* In cfglayout mode, there do not have to be labels at the
4765 beginning of a block, or jumps at the end, so the previous
4766 conditions would not stop us when we reach bb boundary. */
4767 || BLOCK_FOR_INSN (prev) != bb)
4770 set = set_of (op0, prev);
4773 && (GET_CODE (set) != SET
4774 || !rtx_equal_p (SET_DEST (set), op0)))
4777 /* If this is setting OP0, get what it sets it to if it looks
4781 enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
4782 #ifdef FLOAT_STORE_FLAG_VALUE
4783 REAL_VALUE_TYPE fsfv;
4786 /* ??? We may not combine comparisons done in a CCmode with
4787 comparisons not done in a CCmode. This is to aid targets
4788 like Alpha that have an IEEE compliant EQ instruction, and
4789 a non-IEEE compliant BEQ instruction. The use of CCmode is
4790 actually artificial, simply to prevent the combination, but
4791 should not affect other platforms.
4793 However, we must allow VOIDmode comparisons to match either
4794 CCmode or non-CCmode comparison, because some ports have
4795 modeless comparisons inside branch patterns.
4797 ??? This mode check should perhaps look more like the mode check
4798 in simplify_comparison in combine. */
4800 if ((GET_CODE (SET_SRC (set)) == COMPARE
4803 && GET_MODE_CLASS (inner_mode) == MODE_INT
4804 && (GET_MODE_BITSIZE (inner_mode)
4805 <= HOST_BITS_PER_WIDE_INT)
4806 && (STORE_FLAG_VALUE
4807 & ((HOST_WIDE_INT) 1
4808 << (GET_MODE_BITSIZE (inner_mode) - 1))))
4809 #ifdef FLOAT_STORE_FLAG_VALUE
4811 && SCALAR_FLOAT_MODE_P (inner_mode)
4812 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
4813 REAL_VALUE_NEGATIVE (fsfv)))
4816 && COMPARISON_P (SET_SRC (set))))
4817 && (((GET_MODE_CLASS (mode) == MODE_CC)
4818 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
4819 || mode == VOIDmode || inner_mode == VOIDmode))
4821 else if (((code == EQ
4823 && (GET_MODE_BITSIZE (inner_mode)
4824 <= HOST_BITS_PER_WIDE_INT)
4825 && GET_MODE_CLASS (inner_mode) == MODE_INT
4826 && (STORE_FLAG_VALUE
4827 & ((HOST_WIDE_INT) 1
4828 << (GET_MODE_BITSIZE (inner_mode) - 1))))
4829 #ifdef FLOAT_STORE_FLAG_VALUE
4831 && SCALAR_FLOAT_MODE_P (inner_mode)
4832 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
4833 REAL_VALUE_NEGATIVE (fsfv)))
4836 && COMPARISON_P (SET_SRC (set))
4837 && (((GET_MODE_CLASS (mode) == MODE_CC)
4838 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
4839 || mode == VOIDmode || inner_mode == VOIDmode))
4849 else if (reg_set_p (op0, prev))
4850 /* If this sets OP0, but not directly, we have to give up. */
4855 /* If the caller is expecting the condition to be valid at INSN,
4856 make sure X doesn't change before INSN. */
4857 if (valid_at_insn_p)
4858 if (modified_in_p (x, prev) || modified_between_p (x, prev, insn))
4860 if (COMPARISON_P (x))
4861 code = GET_CODE (x);
4864 code = reversed_comparison_code (x, prev);
4865 if (code == UNKNOWN)
4870 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
4876 /* If constant is first, put it last. */
4877 if (CONSTANT_P (op0))
4878 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
4880 /* If OP0 is the result of a comparison, we weren't able to find what
4881 was really being compared, so fail. */
4883 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
4886 /* Canonicalize any ordered comparison with integers involving equality
4887 if we can do computations in the relevant mode and we do not
4890 if (GET_MODE_CLASS (GET_MODE (op0)) != MODE_CC
4891 && CONST_INT_P (op1)
4892 && GET_MODE (op0) != VOIDmode
4893 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
4895 HOST_WIDE_INT const_val = INTVAL (op1);
4896 unsigned HOST_WIDE_INT uconst_val = const_val;
4897 unsigned HOST_WIDE_INT max_val
4898 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
4903 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
4904 code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
4907 /* When cross-compiling, const_val might be sign-extended from
4908 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
4910 if ((HOST_WIDE_INT) (const_val & max_val)
4911 != (((HOST_WIDE_INT) 1
4912 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
4913 code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
4917 if (uconst_val < max_val)
4918 code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
4922 if (uconst_val != 0)
4923 code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
4931 /* Never return CC0; return zero instead. */
4935 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
4938 /* Given a jump insn JUMP, return the condition that will cause it to branch
4939 to its JUMP_LABEL. If the condition cannot be understood, or is an
4940 inequality floating-point comparison which needs to be reversed, 0 will
4943 If EARLIEST is nonzero, it is a pointer to a place where the earliest
4944 insn used in locating the condition was found. If a replacement test
4945 of the condition is desired, it should be placed in front of that
4946 insn and we will be sure that the inputs are still valid. If EARLIEST
4947 is null, the returned condition will be valid at INSN.
4949 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
4950 compare CC mode register.
4952 VALID_AT_INSN_P is the same as for canonicalize_condition. */
4955 get_condition (rtx jump, rtx *earliest, int allow_cc_mode, int valid_at_insn_p)
4961 /* If this is not a standard conditional jump, we can't parse it. */
4963 || ! any_condjump_p (jump))
4965 set = pc_set (jump);
4967 cond = XEXP (SET_SRC (set), 0);
4969 /* If this branches to JUMP_LABEL when the condition is false, reverse
4972 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
4973 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
4975 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
4976 allow_cc_mode, valid_at_insn_p);
4979 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
4980 TARGET_MODE_REP_EXTENDED.
4982 Note that we assume that the property of
4983 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
4984 narrower than mode B. I.e., if A is a mode narrower than B then in
4985 order to be able to operate on it in mode B, mode A needs to
4986 satisfy the requirements set by the representation of mode B. */
4989 init_num_sign_bit_copies_in_rep (void)
4991 enum machine_mode mode, in_mode;
4993 for (in_mode = GET_CLASS_NARROWEST_MODE (MODE_INT); in_mode != VOIDmode;
4994 in_mode = GET_MODE_WIDER_MODE (mode))
4995 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != in_mode;
4996 mode = GET_MODE_WIDER_MODE (mode))
4998 enum machine_mode i;
5000 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5001 extends to the next widest mode. */
5002 gcc_assert (targetm.mode_rep_extended (mode, in_mode) == UNKNOWN
5003 || GET_MODE_WIDER_MODE (mode) == in_mode);
5005 /* We are in in_mode. Count how many bits outside of mode
5006 have to be copies of the sign-bit. */
5007 for (i = mode; i != in_mode; i = GET_MODE_WIDER_MODE (i))
5009 enum machine_mode wider = GET_MODE_WIDER_MODE (i);
5011 if (targetm.mode_rep_extended (i, wider) == SIGN_EXTEND
5012 /* We can only check sign-bit copies starting from the
5013 top-bit. In order to be able to check the bits we
5014 have already seen we pretend that subsequent bits
5015 have to be sign-bit copies too. */
5016 || num_sign_bit_copies_in_rep [in_mode][mode])
5017 num_sign_bit_copies_in_rep [in_mode][mode]
5018 += GET_MODE_BITSIZE (wider) - GET_MODE_BITSIZE (i);
5023 /* Suppose that truncation from the machine mode of X to MODE is not a
5024 no-op. See if there is anything special about X so that we can
5025 assume it already contains a truncated value of MODE. */
5028 truncated_to_mode (enum machine_mode mode, const_rtx x)
5030 /* This register has already been used in MODE without explicit
5032 if (REG_P (x) && rtl_hooks.reg_truncated_to_mode (mode, x))
5035 /* See if we already satisfy the requirements of MODE. If yes we
5036 can just switch to MODE. */
5037 if (num_sign_bit_copies_in_rep[GET_MODE (x)][mode]
5038 && (num_sign_bit_copies (x, GET_MODE (x))
5039 >= num_sign_bit_copies_in_rep[GET_MODE (x)][mode] + 1))
5045 /* Initialize non_rtx_starting_operands, which is used to speed up
5051 for (i = 0; i < NUM_RTX_CODE; i++)
5053 const char *format = GET_RTX_FORMAT (i);
5054 const char *first = strpbrk (format, "eEV");
5055 non_rtx_starting_operands[i] = first ? first - format : -1;
5058 init_num_sign_bit_copies_in_rep ();
5061 /* Check whether this is a constant pool constant. */
5063 constant_pool_constant_p (rtx x)
5065 x = avoid_constant_pool_reference (x);
5066 return GET_CODE (x) == CONST_DOUBLE;
5069 /* If M is a bitmask that selects a field of low-order bits within an item but
5070 not the entire word, return the length of the field. Return -1 otherwise.
5071 M is used in machine mode MODE. */
5074 low_bitmask_len (enum machine_mode mode, unsigned HOST_WIDE_INT m)
5076 if (mode != VOIDmode)
5078 if (GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT)
5080 m &= GET_MODE_MASK (mode);
5083 return exact_log2 (m + 1);