X-Git-Url: http://git.sourceforge.jp/view?a=blobdiff_plain;f=gcc%2Frtlanal.c;h=53e6d83d1b8ab03d635c438cccc0e9539ce5bcf9;hb=f67bd9ad79bc7ee36b0245d2f74e1685c3a471b6;hp=d61df8902bf5da0d177eb5bb39806047a181b830;hpb=91c32f4f8c0e8b6ab4e53265e890c45192055454;p=pf3gnuchains%2Fgcc-fork.git diff --git a/gcc/rtlanal.c b/gcc/rtlanal.c index d61df8902bf..53e6d83d1b8 100644 --- a/gcc/rtlanal.c +++ b/gcc/rtlanal.c @@ -1,6 +1,7 @@ -/* Analyze RTL for C-Compiler +/* Analyze RTL for GNU compiler. Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, - 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. + 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software + Foundation, Inc. This file is part of GCC. @@ -16,8 +17,8 @@ for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free -Software Foundation, 59 Temple Place - Suite 330, Boston, MA -02111-1307, USA. */ +Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA +02110-1301, USA. */ #include "config.h" @@ -33,14 +34,14 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA #include "output.h" #include "tm_p.h" #include "flags.h" -#include "basic-block.h" #include "real.h" #include "regs.h" #include "function.h" /* Forward declarations */ -static int global_reg_mentioned_p_1 (rtx *, void *); static void set_of_1 (rtx, rtx, void *); +static bool covers_regno_p (rtx, unsigned int); +static bool covers_regno_no_parallel_p (rtx, unsigned int); static int rtx_referenced_p_1 (rtx *, void *); static int computed_jump_p_1 (rtx); static void parms_set (rtx, rtx, void *); @@ -57,11 +58,31 @@ static unsigned int cached_num_sign_bit_copies (rtx, enum machine_mode, rtx, static unsigned int num_sign_bit_copies1 (rtx, enum machine_mode, rtx, enum machine_mode, unsigned int); +/* Offset of the first 'e', 'E' or 'V' operand for each rtx code, or + -1 if a code has no such operand. */ +static int non_rtx_starting_operands[NUM_RTX_CODE]; + /* Bit flags that specify the machine subtype we are compiling for. Bits are tested using macros TARGET_... defined in the tm.h file and set by `-m...' switches. Must be defined in rtlanal.c. */ int target_flags; + +/* Truncation narrows the mode from SOURCE mode to DESTINATION mode. + If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is + SIGN_EXTEND then while narrowing we also have to enforce the + representation and sign-extend the value to mode DESTINATION_REP. + + If the value is already sign-extended to DESTINATION_REP mode we + can just switch to DESTINATION mode on it. For each pair of + integral modes SOURCE and DESTINATION, when truncating from SOURCE + to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION] + contains the number of high-order bits in SOURCE that have to be + copies of the sign-bit so that we can do this mode-switch to + DESTINATION. */ + +static unsigned int +num_sign_bit_copies_in_rep[MAX_MODE_INT + 1][MAX_MODE_INT + 1]; /* Return 1 if the value of X is unstable (would be different at a different point in the program). @@ -218,10 +239,13 @@ rtx_varies_p (rtx x, int for_alias) return 0; } -/* Return 0 if the use of X as an address in a MEM can cause a trap. */ +/* Return nonzero if the use of X as an address in a MEM can cause a trap. + MODE is the mode of the MEM (not that of X) and UNALIGNED_MEMS controls + whether nonzero is returned for unaligned memory accesses on strict + alignment machines. */ -int -rtx_addr_can_trap_p (rtx x) +static int +rtx_addr_can_trap_p_1 (rtx x, enum machine_mode mode, bool unaligned_mems) { enum rtx_code code = GET_CODE (x); @@ -247,27 +271,54 @@ rtx_addr_can_trap_p (rtx x) return 1; case CONST: - return rtx_addr_can_trap_p (XEXP (x, 0)); + return rtx_addr_can_trap_p_1 (XEXP (x, 0), mode, unaligned_mems); case PLUS: - /* An address is assumed not to trap if it is an address that can't - trap plus a constant integer or it is the pic register plus a - constant. */ - return ! ((! rtx_addr_can_trap_p (XEXP (x, 0)) - && GET_CODE (XEXP (x, 1)) == CONST_INT) - || (XEXP (x, 0) == pic_offset_table_rtx - && CONSTANT_P (XEXP (x, 1)))); + /* An address is assumed not to trap if: + - it is an address that can't trap plus a constant integer, + with the proper remainder modulo the mode size if we are + considering unaligned memory references. */ + if (!rtx_addr_can_trap_p_1 (XEXP (x, 0), mode, unaligned_mems) + && GET_CODE (XEXP (x, 1)) == CONST_INT) + { + HOST_WIDE_INT offset; + + if (!STRICT_ALIGNMENT + || !unaligned_mems + || GET_MODE_SIZE (mode) == 0) + return 0; + + offset = INTVAL (XEXP (x, 1)); + +#ifdef SPARC_STACK_BOUNDARY_HACK + /* ??? The SPARC port may claim a STACK_BOUNDARY higher than + the real alignment of %sp. However, when it does this, the + alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */ + if (SPARC_STACK_BOUNDARY_HACK + && (XEXP (x, 0) == stack_pointer_rtx + || XEXP (x, 0) == hard_frame_pointer_rtx)) + offset -= STACK_POINTER_OFFSET; +#endif + + return offset % GET_MODE_SIZE (mode) != 0; + } + + /* - or it is the pic register plus a constant. */ + if (XEXP (x, 0) == pic_offset_table_rtx && CONSTANT_P (XEXP (x, 1))) + return 0; + + return 1; case LO_SUM: case PRE_MODIFY: - return rtx_addr_can_trap_p (XEXP (x, 1)); + return rtx_addr_can_trap_p_1 (XEXP (x, 1), mode, unaligned_mems); case PRE_DEC: case PRE_INC: case POST_DEC: case POST_INC: case POST_MODIFY: - return rtx_addr_can_trap_p (XEXP (x, 0)); + return rtx_addr_can_trap_p_1 (XEXP (x, 0), mode, unaligned_mems); default: break; @@ -277,6 +328,14 @@ rtx_addr_can_trap_p (rtx x) return 1; } +/* Return nonzero if the use of X as an address in a MEM can cause a trap. */ + +int +rtx_addr_can_trap_p (rtx x) +{ + return rtx_addr_can_trap_p_1 (x, VOIDmode, false); +} + /* Return true if X is an address that is known to not be zero. */ bool @@ -432,78 +491,6 @@ get_related_value (rtx x) return 0; } -/* A subroutine of global_reg_mentioned_p, returns 1 if *LOC mentions - a global register. */ - -static int -global_reg_mentioned_p_1 (rtx *loc, void *data ATTRIBUTE_UNUSED) -{ - int regno; - rtx x = *loc; - - if (! x) - return 0; - - switch (GET_CODE (x)) - { - case SUBREG: - if (REG_P (SUBREG_REG (x))) - { - if (REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER - && global_regs[subreg_regno (x)]) - return 1; - return 0; - } - break; - - case REG: - regno = REGNO (x); - if (regno < FIRST_PSEUDO_REGISTER && global_regs[regno]) - return 1; - return 0; - - case SCRATCH: - case PC: - case CC0: - case CONST_INT: - case CONST_DOUBLE: - case CONST: - case LABEL_REF: - return 0; - - case CALL: - /* A non-constant call might use a global register. */ - return 1; - - default: - break; - } - - return 0; -} - -/* Returns nonzero if X mentions a global register. */ - -int -global_reg_mentioned_p (rtx x) -{ - if (INSN_P (x)) - { - if (CALL_P (x)) - { - if (! CONST_OR_PURE_CALL_P (x)) - return 1; - x = CALL_INSN_FUNCTION_USAGE (x); - if (x == 0) - return 0; - } - else - x = PATTERN (x); - } - - return for_each_rtx (&x, global_reg_mentioned_p_1, NULL); -} - /* Return the number of places FIND appears within X. If COUNT_DEST is zero, we do not count occurrences inside the destination of a SET. */ @@ -661,9 +648,7 @@ reg_used_between_p (rtx reg, rtx from_insn, rtx to_insn) for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn)) if (INSN_P (insn) && (reg_overlap_mentioned_p (reg, PATTERN (insn)) - || (CALL_P (insn) - && (find_reg_fusage (insn, USE, reg) - || find_reg_fusage (insn, CLOBBER, reg))))) + || (CALL_P (insn) && find_reg_fusage (insn, USE, reg)))) return 1; return 0; } @@ -783,51 +768,6 @@ reg_set_p (rtx reg, rtx insn) } /* Similar to reg_set_between_p, but check all registers in X. Return 0 - only if none of them are modified between START and END. Do not - consider non-registers one way or the other. */ - -int -regs_set_between_p (rtx x, rtx start, rtx end) -{ - enum rtx_code code = GET_CODE (x); - const char *fmt; - int i, j; - - switch (code) - { - case CONST_INT: - case CONST_DOUBLE: - case CONST_VECTOR: - case CONST: - case SYMBOL_REF: - case LABEL_REF: - case PC: - case CC0: - return 0; - - case REG: - return reg_set_between_p (x, start, end); - - default: - break; - } - - fmt = GET_RTX_FORMAT (code); - for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) - { - if (fmt[i] == 'e' && regs_set_between_p (XEXP (x, i), start, end)) - return 1; - - else if (fmt[i] == 'E') - for (j = XVECLEN (x, i) - 1; j >= 0; j--) - if (regs_set_between_p (XVECEXP (x, i, j), start, end)) - return 1; - } - - return 0; -} - -/* Similar to reg_set_between_p, but check all registers in X. Return 0 only if none of them are modified between START and END. Return 1 if X contains a MEM; this routine does usememory aliasing. */ @@ -857,10 +797,10 @@ modified_between_p (rtx x, rtx start, rtx end) return 1; case MEM: - if (MEM_READONLY_P (x)) - return 0; if (modified_between_p (XEXP (x, 0), start, end)) return 1; + if (MEM_READONLY_P (x)) + return 0; for (insn = NEXT_INSN (start); insn != end; insn = NEXT_INSN (insn)) if (memory_modified_in_insn_p (x, insn)) return 1; @@ -915,10 +855,10 @@ modified_in_p (rtx x, rtx insn) return 1; case MEM: - if (MEM_READONLY_P (x)) - return 0; if (modified_in_p (XEXP (x, 0), insn)) return 1; + if (MEM_READONLY_P (x)) + return 0; if (memory_modified_in_insn_p (x, insn)) return 1; return 0; @@ -1074,8 +1014,7 @@ set_noop_p (rtx set) if (MEM_P (dst) && MEM_P (src)) return rtx_equal_p (dst, src) && !side_effects_p (dst); - if (GET_CODE (dst) == SIGN_EXTRACT - || GET_CODE (dst) == ZERO_EXTRACT) + if (GET_CODE (dst) == ZERO_EXTRACT) return rtx_equal_p (XEXP (dst, 0), src) && ! BYTES_BIG_ENDIAN && XEXP (dst, 2) == const0_rtx && !side_effects_p (src); @@ -1350,8 +1289,18 @@ reg_overlap_mentioned_p (rtx x, rtx in) fmt = GET_RTX_FORMAT (GET_CODE (in)); for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--) - if (fmt[i] == 'e' && reg_overlap_mentioned_p (x, XEXP (in, i))) - return 1; + if (fmt[i] == 'e') + { + if (reg_overlap_mentioned_p (x, XEXP (in, i))) + return 1; + } + else if (fmt[i] == 'E') + { + int j; + for (j = XVECLEN (in, i) - 1; j >= 0; --j) + if (reg_overlap_mentioned_p (x, XVECEXP (in, i, j))) + return 1; + } return 0; } @@ -1404,7 +1353,6 @@ note_stores (rtx x, void (*fun) (rtx, rtx, void *), void *data) && (!REG_P (SUBREG_REG (dest)) || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER)) || GET_CODE (dest) == ZERO_EXTRACT - || GET_CODE (dest) == SIGN_EXTRACT || GET_CODE (dest) == STRICT_LOW_PART) dest = XEXP (dest, 0); @@ -1513,8 +1461,8 @@ note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data) This will be true if X is (cc0) or if X is a register and X dies in INSN or because INSN entirely sets X. - "Entirely set" means set directly and not through a SUBREG, - ZERO_EXTRACT or SIGN_EXTRACT, so no trace of the old contents remains. + "Entirely set" means set directly and not through a SUBREG, or + ZERO_EXTRACT, so no trace of the old contents remains. Likewise, REG_INC does not count. REG may be a hard or pseudo reg. Renumbering is not taken into account, @@ -1549,13 +1497,64 @@ dead_or_set_p (rtx insn, rtx x) return 1; } +/* Return TRUE iff DEST is a register or subreg of a register and + doesn't change the number of words of the inner register, and any + part of the register is TEST_REGNO. */ + +static bool +covers_regno_no_parallel_p (rtx dest, unsigned int test_regno) +{ + unsigned int regno, endregno; + + if (GET_CODE (dest) == SUBREG + && (((GET_MODE_SIZE (GET_MODE (dest)) + + UNITS_PER_WORD - 1) / UNITS_PER_WORD) + == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))) + + UNITS_PER_WORD - 1) / UNITS_PER_WORD))) + dest = SUBREG_REG (dest); + + if (!REG_P (dest)) + return false; + + regno = REGNO (dest); + endregno = (regno >= FIRST_PSEUDO_REGISTER ? regno + 1 + : regno + hard_regno_nregs[regno][GET_MODE (dest)]); + return (test_regno >= regno && test_regno < endregno); +} + +/* Like covers_regno_no_parallel_p, but also handles PARALLELs where + any member matches the covers_regno_no_parallel_p criteria. */ + +static bool +covers_regno_p (rtx dest, unsigned int test_regno) +{ + if (GET_CODE (dest) == PARALLEL) + { + /* Some targets place small structures in registers for return + values of functions, and those registers are wrapped in + PARALLELs that we may see as the destination of a SET. */ + int i; + + for (i = XVECLEN (dest, 0) - 1; i >= 0; i--) + { + rtx inner = XEXP (XVECEXP (dest, 0, i), 0); + if (inner != NULL_RTX + && covers_regno_no_parallel_p (inner, test_regno)) + return true; + } + + return false; + } + else + return covers_regno_no_parallel_p (dest, test_regno); +} + /* Utility function for dead_or_set_p to check an individual register. Also called from flow.c. */ int dead_or_set_regno_p (rtx insn, unsigned int test_regno) { - unsigned int regno, endregno; rtx pattern; /* See if there is a death note for something that includes TEST_REGNO. */ @@ -1572,28 +1571,7 @@ dead_or_set_regno_p (rtx insn, unsigned int test_regno) pattern = COND_EXEC_CODE (pattern); if (GET_CODE (pattern) == SET) - { - rtx dest = SET_DEST (pattern); - - /* A value is totally replaced if it is the destination or the - destination is a SUBREG of REGNO that does not change the number of - words in it. */ - if (GET_CODE (dest) == SUBREG - && (((GET_MODE_SIZE (GET_MODE (dest)) - + UNITS_PER_WORD - 1) / UNITS_PER_WORD) - == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))) - + UNITS_PER_WORD - 1) / UNITS_PER_WORD))) - dest = SUBREG_REG (dest); - - if (!REG_P (dest)) - return 0; - - regno = REGNO (dest); - endregno = (regno >= FIRST_PSEUDO_REGISTER ? regno + 1 - : regno + hard_regno_nregs[regno][GET_MODE (dest)]); - - return (test_regno >= regno && test_regno < endregno); - } + return covers_regno_p (SET_DEST (pattern), test_regno); else if (GET_CODE (pattern) == PARALLEL) { int i; @@ -1605,27 +1583,9 @@ dead_or_set_regno_p (rtx insn, unsigned int test_regno) if (GET_CODE (body) == COND_EXEC) body = COND_EXEC_CODE (body); - if (GET_CODE (body) == SET || GET_CODE (body) == CLOBBER) - { - rtx dest = SET_DEST (body); - - if (GET_CODE (dest) == SUBREG - && (((GET_MODE_SIZE (GET_MODE (dest)) - + UNITS_PER_WORD - 1) / UNITS_PER_WORD) - == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))) - + UNITS_PER_WORD - 1) / UNITS_PER_WORD))) - dest = SUBREG_REG (dest); - - if (!REG_P (dest)) - continue; - - regno = REGNO (dest); - endregno = (regno >= FIRST_PSEUDO_REGISTER ? regno + 1 - : regno + hard_regno_nregs[regno][GET_MODE (dest)]); - - if (test_regno >= regno && test_regno < endregno) - return 1; - } + if ((GET_CODE (body) == SET || GET_CODE (body) == CLOBBER) + && covers_regno_p (SET_DEST (body), test_regno)) + return 1; } } @@ -1640,6 +1600,8 @@ find_reg_note (rtx insn, enum reg_note kind, rtx datum) { rtx link; + gcc_assert (insn); + /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */ if (! INSN_P (insn)) return 0; @@ -2087,14 +2049,25 @@ side_effects_p (rtx x) return 0; } -/* Return nonzero if evaluating rtx X might cause a trap. */ +enum may_trap_p_flags +{ + MTP_UNALIGNED_MEMS = 1, + MTP_AFTER_MOVE = 2 +}; +/* Return nonzero if evaluating rtx X might cause a trap. + (FLAGS & MTP_UNALIGNED_MEMS) controls whether nonzero is returned for + unaligned memory accesses on strict alignment machines. If + (FLAGS & AFTER_MOVE) is true, returns nonzero even in case the expression + cannot trap at its current location, but it might become trapping if moved + elsewhere. */ -int -may_trap_p (rtx x) +static int +may_trap_p_1 (rtx x, unsigned flags) { int i; enum rtx_code code; const char *fmt; + bool unaligned_mems = (flags & MTP_UNALIGNED_MEMS) != 0; if (x == 0) return 0; @@ -2124,9 +2097,15 @@ may_trap_p (rtx x) /* Memory ref can trap unless it's a static var or a stack slot. */ case MEM: - if (MEM_NOTRAP_P (x)) + if (/* MEM_NOTRAP_P only relates to the actual position of the memory + reference; moving it out of condition might cause its address + become invalid. */ + !(flags & MTP_AFTER_MOVE) + && MEM_NOTRAP_P (x) + && (!STRICT_ALIGNMENT || !unaligned_mems)) return 0; - return rtx_addr_can_trap_p (XEXP (x, 0)); + return + rtx_addr_can_trap_p_1 (XEXP (x, 0), GET_MODE (x), unaligned_mems); /* Division by a non-constant might trap. */ case DIV: @@ -2135,11 +2114,9 @@ may_trap_p (rtx x) case UMOD: if (HONOR_SNANS (GET_MODE (x))) return 1; - if (! CONSTANT_P (XEXP (x, 1)) - || (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT - && flag_trapping_math)) - return 1; - if (XEXP (x, 1) == const0_rtx) + if (SCALAR_FLOAT_MODE_P (GET_MODE (x))) + return flag_trapping_math; + if (!CONSTANT_P (XEXP (x, 1)) || (XEXP (x, 1) == const0_rtx)) return 1; break; @@ -2188,12 +2165,13 @@ may_trap_p (rtx x) case NEG: case ABS: + case SUBREG: /* These operations don't trap even with floating point. */ break; default: /* Any floating arithmetic may trap. */ - if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT + if (SCALAR_FLOAT_MODE_P (GET_MODE (x)) && flag_trapping_math) return 1; } @@ -2203,19 +2181,82 @@ may_trap_p (rtx x) { if (fmt[i] == 'e') { - if (may_trap_p (XEXP (x, i))) + if (may_trap_p_1 (XEXP (x, i), flags)) return 1; } else if (fmt[i] == 'E') { int j; for (j = 0; j < XVECLEN (x, i); j++) - if (may_trap_p (XVECEXP (x, i, j))) + if (may_trap_p_1 (XVECEXP (x, i, j), flags)) return 1; } } return 0; } + +/* Return nonzero if evaluating rtx X might cause a trap. */ + +int +may_trap_p (rtx x) +{ + return may_trap_p_1 (x, 0); +} + +/* Return nonzero if evaluating rtx X might cause a trap, when the expression + is moved from its current location by some optimization. */ + +int +may_trap_after_code_motion_p (rtx x) +{ + return may_trap_p_1 (x, MTP_AFTER_MOVE); +} + +/* Same as above, but additionally return nonzero if evaluating rtx X might + cause a fault. We define a fault for the purpose of this function as a + erroneous execution condition that cannot be encountered during the normal + execution of a valid program; the typical example is an unaligned memory + access on a strict alignment machine. The compiler guarantees that it + doesn't generate code that will fault from a valid program, but this + guarantee doesn't mean anything for individual instructions. Consider + the following example: + + struct S { int d; union { char *cp; int *ip; }; }; + + int foo(struct S *s) + { + if (s->d == 1) + return *s->ip; + else + return *s->cp; + } + + on a strict alignment machine. In a valid program, foo will never be + invoked on a structure for which d is equal to 1 and the underlying + unique field of the union not aligned on a 4-byte boundary, but the + expression *s->ip might cause a fault if considered individually. + + At the RTL level, potentially problematic expressions will almost always + verify may_trap_p; for example, the above dereference can be emitted as + (mem:SI (reg:P)) and this expression is may_trap_p for a generic register. + However, suppose that foo is inlined in a caller that causes s->cp to + point to a local character variable and guarantees that s->d is not set + to 1; foo may have been effectively translated into pseudo-RTL as: + + if ((reg:SI) == 1) + (set (reg:SI) (mem:SI (%fp - 7))) + else + (set (reg:QI) (mem:QI (%fp - 7))) + + Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a + memory reference to a stack slot, but it will certainly cause a fault + on a strict alignment machine. */ + +int +may_trap_or_fault_p (rtx x) +{ + return may_trap_p_1 (x, MTP_UNALIGNED_MEMS); +} /* Return nonzero if X contains a comparison that is not either EQ or NE, i.e., an inequality. */ @@ -2346,128 +2387,28 @@ replace_rtx (rtx x, rtx from, rtx to) return x; } -/* Throughout the rtx X, replace many registers according to REG_MAP. - Return the replacement for X (which may be X with altered contents). - REG_MAP[R] is the replacement for register R, or 0 for don't replace. - NREGS is the length of REG_MAP; regs >= NREGS are not mapped. - - We only support REG_MAP entries of REG or SUBREG. Also, hard registers - should not be mapped to pseudos or vice versa since validate_change - is not called. - - If REPLACE_DEST is 1, replacements are also done in destinations; - otherwise, only sources are replaced. */ +/* Replace occurrences of the old label in *X with the new one. + DATA is a REPLACE_LABEL_DATA containing the old and new labels. */ -rtx -replace_regs (rtx x, rtx *reg_map, unsigned int nregs, int replace_dest) +int +replace_label (rtx *x, void *data) { - enum rtx_code code; - int i; - const char *fmt; + rtx l = *x; + rtx old_label = ((replace_label_data *) data)->r1; + rtx new_label = ((replace_label_data *) data)->r2; + bool update_label_nuses = ((replace_label_data *) data)->update_label_nuses; - if (x == 0) - return x; + if (l == NULL_RTX) + return 0; - code = GET_CODE (x); - switch (code) + if (GET_CODE (l) == SYMBOL_REF + && CONSTANT_POOL_ADDRESS_P (l)) { - case SCRATCH: - case PC: - case CC0: - case CONST_INT: - case CONST_DOUBLE: - case CONST_VECTOR: - case CONST: - case SYMBOL_REF: - case LABEL_REF: - return x; - - case REG: - /* Verify that the register has an entry before trying to access it. */ - if (REGNO (x) < nregs && reg_map[REGNO (x)] != 0) - { - /* SUBREGs can't be shared. Always return a copy to ensure that if - this replacement occurs more than once then each instance will - get distinct rtx. */ - if (GET_CODE (reg_map[REGNO (x)]) == SUBREG) - return copy_rtx (reg_map[REGNO (x)]); - return reg_map[REGNO (x)]; - } - return x; - - case SUBREG: - /* Prevent making nested SUBREGs. */ - if (REG_P (SUBREG_REG (x)) && REGNO (SUBREG_REG (x)) < nregs - && reg_map[REGNO (SUBREG_REG (x))] != 0 - && GET_CODE (reg_map[REGNO (SUBREG_REG (x))]) == SUBREG) + rtx c = get_pool_constant (l); + if (rtx_referenced_p (old_label, c)) { - rtx map_val = reg_map[REGNO (SUBREG_REG (x))]; - return simplify_gen_subreg (GET_MODE (x), map_val, - GET_MODE (SUBREG_REG (x)), - SUBREG_BYTE (x)); - } - break; - - case SET: - if (replace_dest) - SET_DEST (x) = replace_regs (SET_DEST (x), reg_map, nregs, 0); - - else if (MEM_P (SET_DEST (x)) - || GET_CODE (SET_DEST (x)) == STRICT_LOW_PART) - /* Even if we are not to replace destinations, replace register if it - is CONTAINED in destination (destination is memory or - STRICT_LOW_PART). */ - XEXP (SET_DEST (x), 0) = replace_regs (XEXP (SET_DEST (x), 0), - reg_map, nregs, 0); - else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT) - /* Similarly, for ZERO_EXTRACT we replace all operands. */ - break; - - SET_SRC (x) = replace_regs (SET_SRC (x), reg_map, nregs, 0); - return x; - - default: - break; - } - - fmt = GET_RTX_FORMAT (code); - for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) - { - if (fmt[i] == 'e') - XEXP (x, i) = replace_regs (XEXP (x, i), reg_map, nregs, replace_dest); - else if (fmt[i] == 'E') - { - int j; - for (j = 0; j < XVECLEN (x, i); j++) - XVECEXP (x, i, j) = replace_regs (XVECEXP (x, i, j), reg_map, - nregs, replace_dest); - } - } - return x; -} - -/* Replace occurrences of the old label in *X with the new one. - DATA is a REPLACE_LABEL_DATA containing the old and new labels. */ - -int -replace_label (rtx *x, void *data) -{ - rtx l = *x; - rtx old_label = ((replace_label_data *) data)->r1; - rtx new_label = ((replace_label_data *) data)->r2; - bool update_label_nuses = ((replace_label_data *) data)->update_label_nuses; - - if (l == NULL_RTX) - return 0; - - if (GET_CODE (l) == SYMBOL_REF - && CONSTANT_POOL_ADDRESS_P (l)) - { - rtx c = get_pool_constant (l); - if (rtx_referenced_p (old_label, c)) - { - rtx new_c, new_l; - replace_label_data *d = (replace_label_data *) data; + rtx new_c, new_l; + replace_label_data *d = (replace_label_data *) data; /* Create a copy of constant C; replace the label inside but do not update LABEL_NUSES because uses in constant pool @@ -2658,6 +2599,82 @@ computed_jump_p (rtx insn) return 0; } +/* Optimized loop of for_each_rtx, trying to avoid useless recursive + calls. Processes the subexpressions of EXP and passes them to F. */ +static int +for_each_rtx_1 (rtx exp, int n, rtx_function f, void *data) +{ + int result, i, j; + const char *format = GET_RTX_FORMAT (GET_CODE (exp)); + rtx *x; + + for (; format[n] != '\0'; n++) + { + switch (format[n]) + { + case 'e': + /* Call F on X. */ + x = &XEXP (exp, n); + result = (*f) (x, data); + if (result == -1) + /* Do not traverse sub-expressions. */ + continue; + else if (result != 0) + /* Stop the traversal. */ + return result; + + if (*x == NULL_RTX) + /* There are no sub-expressions. */ + continue; + + i = non_rtx_starting_operands[GET_CODE (*x)]; + if (i >= 0) + { + result = for_each_rtx_1 (*x, i, f, data); + if (result != 0) + return result; + } + break; + + case 'V': + case 'E': + if (XVEC (exp, n) == 0) + continue; + for (j = 0; j < XVECLEN (exp, n); ++j) + { + /* Call F on X. */ + x = &XVECEXP (exp, n, j); + result = (*f) (x, data); + if (result == -1) + /* Do not traverse sub-expressions. */ + continue; + else if (result != 0) + /* Stop the traversal. */ + return result; + + if (*x == NULL_RTX) + /* There are no sub-expressions. */ + continue; + + i = non_rtx_starting_operands[GET_CODE (*x)]; + if (i >= 0) + { + result = for_each_rtx_1 (*x, i, f, data); + if (result != 0) + return result; + } + } + break; + + default: + /* Nothing to do. */ + break; + } + } + + return 0; +} + /* Traverse X via depth-first search, calling F for each sub-expression (including X itself). F is also passed the DATA. If F returns -1, do not traverse sub-expressions, but continue @@ -2675,8 +2692,6 @@ int for_each_rtx (rtx *x, rtx_function f, void *data) { int result; - int length; - const char *format; int i; /* Call F on X. */ @@ -2692,43 +2707,14 @@ for_each_rtx (rtx *x, rtx_function f, void *data) /* There are no sub-expressions. */ return 0; - length = GET_RTX_LENGTH (GET_CODE (*x)); - format = GET_RTX_FORMAT (GET_CODE (*x)); - - for (i = 0; i < length; ++i) - { - switch (format[i]) - { - case 'e': - result = for_each_rtx (&XEXP (*x, i), f, data); - if (result != 0) - return result; - break; - - case 'V': - case 'E': - if (XVEC (*x, i) != 0) - { - int j; - for (j = 0; j < XVECLEN (*x, i); ++j) - { - result = for_each_rtx (&XVECEXP (*x, i, j), f, data); - if (result != 0) - return result; - } - } - break; - - default: - /* Nothing to do. */ - break; - } - - } + i = non_rtx_starting_operands[GET_CODE (*x)]; + if (i < 0) + return 0; - return 0; + return for_each_rtx_1 (*x, i, f, data); } + /* Searches X for any reference to REGNO, returning the rtx of the reference found if any. Otherwise, returns NULL_RTX. */ @@ -2857,82 +2843,6 @@ auto_inc_p (rtx x) return 0; } -/* Return 1 if the sequence of instructions beginning with FROM and up - to and including TO is safe to move. If NEW_TO is non-NULL, and - the sequence is not already safe to move, but can be easily - extended to a sequence which is safe, then NEW_TO will point to the - end of the extended sequence. - - For now, this function only checks that the region contains whole - exception regions, but it could be extended to check additional - conditions as well. */ - -int -insns_safe_to_move_p (rtx from, rtx to, rtx *new_to) -{ - int eh_region_count = 0; - int past_to_p = 0; - rtx r = from; - - /* By default, assume the end of the region will be what was - suggested. */ - if (new_to) - *new_to = to; - - while (r) - { - if (NOTE_P (r)) - { - switch (NOTE_LINE_NUMBER (r)) - { - case NOTE_INSN_EH_REGION_BEG: - ++eh_region_count; - break; - - case NOTE_INSN_EH_REGION_END: - if (eh_region_count == 0) - /* This sequence of instructions contains the end of - an exception region, but not he beginning. Moving - it will cause chaos. */ - return 0; - - --eh_region_count; - break; - - default: - break; - } - } - else if (past_to_p) - /* If we've passed TO, and we see a non-note instruction, we - can't extend the sequence to a movable sequence. */ - return 0; - - if (r == to) - { - if (!new_to) - /* It's OK to move the sequence if there were matched sets of - exception region notes. */ - return eh_region_count == 0; - - past_to_p = 1; - } - - /* It's OK to move the sequence if there were matched sets of - exception region notes. */ - if (past_to_p && eh_region_count == 0) - { - *new_to = r; - return 1; - } - - /* Go to the next instruction. */ - r = NEXT_INSN (r); - } - - return 0; -} - /* Return nonzero if IN contains a piece of rtl that has the address LOC. */ int loc_mentioned_in_p (rtx *loc, rtx in) @@ -3020,13 +2930,37 @@ unsigned int subreg_regno_offset (unsigned int xregno, enum machine_mode xmode, unsigned int offset, enum machine_mode ymode) { - int nregs_xmode, nregs_ymode; + int nregs_xmode, nregs_ymode, nregs_xmode_unit_int; int mode_multiple, nregs_multiple; int y_offset; + enum machine_mode xmode_unit, xmode_unit_int; gcc_assert (xregno < FIRST_PSEUDO_REGISTER); - nregs_xmode = hard_regno_nregs[xregno][xmode]; + if (GET_MODE_INNER (xmode) == VOIDmode) + xmode_unit = xmode; + else + xmode_unit = GET_MODE_INNER (xmode); + + if (FLOAT_MODE_P (xmode_unit)) + { + xmode_unit_int = int_mode_for_mode (xmode_unit); + if (xmode_unit_int == BLKmode) + /* It's probably bad to be here; a port should have an integer mode + that's the same size as anything of which it takes a SUBREG. */ + xmode_unit_int = xmode_unit; + } + else + xmode_unit_int = xmode_unit; + + nregs_xmode_unit_int = hard_regno_nregs[xregno][xmode_unit_int]; + + /* Adjust nregs_xmode to allow for 'holes'. */ + if (nregs_xmode_unit_int != hard_regno_nregs[xregno][xmode_unit]) + nregs_xmode = nregs_xmode_unit_int * GET_MODE_NUNITS (xmode); + else + nregs_xmode = hard_regno_nregs[xregno][xmode]; + nregs_ymode = hard_regno_nregs[xregno][ymode]; /* If this is a big endian paradoxical subreg, which uses more actual @@ -3041,7 +2975,7 @@ subreg_regno_offset (unsigned int xregno, enum machine_mode xmode, if (offset == 0 || nregs_xmode == nregs_ymode) return 0; - /* size of ymode must not be greater than the size of xmode. */ + /* Size of ymode must not be greater than the size of xmode. */ mode_multiple = GET_MODE_SIZE (xmode) / GET_MODE_SIZE (ymode); gcc_assert (mode_multiple != 0); @@ -3056,36 +2990,82 @@ subreg_regno_offset (unsigned int xregno, enum machine_mode xmode, xmode - The mode of xregno. offset - The byte offset. ymode - The mode of a top level SUBREG (or what may become one). - RETURN - The regno offset which would be used. */ + RETURN - Whether the offset is representable. */ bool subreg_offset_representable_p (unsigned int xregno, enum machine_mode xmode, unsigned int offset, enum machine_mode ymode) { - int nregs_xmode, nregs_ymode; + int nregs_xmode, nregs_ymode, nregs_xmode_unit, nregs_xmode_unit_int; int mode_multiple, nregs_multiple; int y_offset; + enum machine_mode xmode_unit, xmode_unit_int; gcc_assert (xregno < FIRST_PSEUDO_REGISTER); - nregs_xmode = hard_regno_nregs[xregno][xmode]; + if (GET_MODE_INNER (xmode) == VOIDmode) + xmode_unit = xmode; + else + xmode_unit = GET_MODE_INNER (xmode); + + if (FLOAT_MODE_P (xmode_unit)) + { + xmode_unit_int = int_mode_for_mode (xmode_unit); + if (xmode_unit_int == BLKmode) + /* It's probably bad to be here; a port should have an integer mode + that's the same size as anything of which it takes a SUBREG. */ + xmode_unit_int = xmode_unit; + } + else + xmode_unit_int = xmode_unit; + + nregs_xmode_unit = hard_regno_nregs[xregno][xmode_unit]; + nregs_xmode_unit_int = hard_regno_nregs[xregno][xmode_unit_int]; + + /* If there are holes in a non-scalar mode in registers, we expect + that it is made up of its units concatenated together. */ + if (nregs_xmode_unit != nregs_xmode_unit_int) + { + gcc_assert (nregs_xmode_unit * GET_MODE_NUNITS (xmode) + == hard_regno_nregs[xregno][xmode]); + + /* You can only ask for a SUBREG of a value with holes in the middle + if you don't cross the holes. (Such a SUBREG should be done by + picking a different register class, or doing it in memory if + necessary.) An example of a value with holes is XCmode on 32-bit + x86 with -m128bit-long-double; it's represented in 6 32-bit registers, + 3 for each part, but in memory it's two 128-bit parts. + Padding is assumed to be at the end (not necessarily the 'high part') + of each unit. */ + if (nregs_xmode_unit != nregs_xmode_unit_int + && (offset / GET_MODE_SIZE (xmode_unit_int) + 1 + < GET_MODE_NUNITS (xmode)) + && (offset / GET_MODE_SIZE (xmode_unit_int) + != ((offset + GET_MODE_SIZE (ymode) - 1) + / GET_MODE_SIZE (xmode_unit_int)))) + return false; + + nregs_xmode = nregs_xmode_unit_int * GET_MODE_NUNITS (xmode); + } + else + nregs_xmode = hard_regno_nregs[xregno][xmode]; + nregs_ymode = hard_regno_nregs[xregno][ymode]; - /* Paradoxical subregs are always valid. */ + /* Paradoxical subregs are otherwise valid. */ if (offset == 0 && nregs_ymode > nregs_xmode && (GET_MODE_SIZE (ymode) > UNITS_PER_WORD ? WORDS_BIG_ENDIAN : BYTES_BIG_ENDIAN)) return true; - /* Lowpart subregs are always valid. */ + /* Lowpart subregs are otherwise valid. */ if (offset == subreg_lowpart_offset (ymode, xmode)) return true; - /* This should always pass, otherwise we don't know how to verify the - constraint. These conditions may be relaxed but subreg_offset would - need to be redesigned. */ + /* This should always pass, otherwise we don't know how to verify + the constraint. These conditions may be relaxed but + subreg_regno_offset would need to be redesigned. */ gcc_assert ((GET_MODE_SIZE (xmode) % GET_MODE_SIZE (ymode)) == 0); - gcc_assert ((GET_MODE_SIZE (ymode) % nregs_ymode) == 0); gcc_assert ((nregs_xmode % nregs_ymode) == 0); /* The XMODE value can be seen as a vector of NREGS_XMODE @@ -3096,7 +3076,7 @@ subreg_offset_representable_p (unsigned int xregno, enum machine_mode xmode, / nregs_xmode, MODE_INT, 0)); - /* size of ymode must not be greater than the size of xmode. */ + /* Size of ymode must not be greater than the size of xmode. */ mode_multiple = GET_MODE_SIZE (xmode) / GET_MODE_SIZE (ymode); gcc_assert (mode_multiple != 0); @@ -3144,12 +3124,15 @@ parms_set (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data) } /* Look backward for first parameter to be loaded. + Note that loads of all parameters will not necessarily be + found if CSE has eliminated some of them (e.g., an argument + to the outer function is passed down as a parameter). Do not skip BOUNDARY. */ rtx find_first_parameter_load (rtx call_insn, rtx boundary) { struct parms_set_data parm; - rtx p, before; + rtx p, before, first_set; /* Since different machines initialize their parameter registers in different orders, assume nothing. Collect the set of all @@ -3171,6 +3154,7 @@ find_first_parameter_load (rtx call_insn, rtx boundary) parm.nregs++; } before = call_insn; + first_set = call_insn; /* Search backward for the first set of a register in this set. */ while (parm.nregs && before != boundary) @@ -3193,9 +3177,20 @@ find_first_parameter_load (rtx call_insn, rtx boundary) } if (INSN_P (before)) - note_stores (PATTERN (before), parms_set, &parm); + { + int nregs_old = parm.nregs; + note_stores (PATTERN (before), parms_set, &parm); + /* If we found something that did not set a parameter reg, + we're done. Do not keep going, as that might result + in hoisting an insn before the setting of a pseudo + that is used by the hoisted insn. */ + if (nregs_old != parm.nregs) + first_set = before; + else + break; + } } - return before; + return first_set; } /* Return true if we should avoid inserting code between INSN and preceding @@ -3291,7 +3286,7 @@ rtx_cost (rtx x, enum rtx_code outer_code ATTRIBUTE_UNUSED) total = COSTS_N_INSNS (7); break; case USE: - /* Used in loop.c and combine.c as a marker. */ + /* Used in combine.c as a marker. */ total = 0; break; default: @@ -3304,6 +3299,7 @@ rtx_cost (rtx x, enum rtx_code outer_code ATTRIBUTE_UNUSED) return 0; case SUBREG: + total = 0; /* If we can't tie these modes, make this expensive. The larger the mode, the more expensive it is. */ if (! MODES_TIEABLE_P (GET_MODE (x), GET_MODE (SUBREG_REG (x)))) @@ -3548,12 +3544,14 @@ nonzero_bits1 (rtx x, enum machine_mode mode, rtx known_x, case GE: case GEU: case UNGE: case LE: case LEU: case UNLE: case UNORDERED: case ORDERED: - /* If this produces an integer result, we know which bits are set. Code here used to clear bits outside the mode of X, but that is now done above. */ - - if (GET_MODE_CLASS (mode) == MODE_INT + /* Mind that MODE is the mode the caller wants to look at this + operation in, and not the actual operation mode. We can wind + up with (subreg:DI (gt:V4HI x y)), and we don't have anything + that describes the results of a vector compare. */ + if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT && mode_width <= HOST_BITS_PER_WIDE_INT) nonzero = STORE_FLAG_VALUE; break; @@ -4333,3 +4331,410 @@ insn_rtx_cost (rtx pat) cost = rtx_cost (SET_SRC (set), SET); return cost > 0 ? cost : COSTS_N_INSNS (1); } + +/* Given an insn INSN and condition COND, return the condition in a + canonical form to simplify testing by callers. Specifically: + + (1) The code will always be a comparison operation (EQ, NE, GT, etc.). + (2) Both operands will be machine operands; (cc0) will have been replaced. + (3) If an operand is a constant, it will be the second operand. + (4) (LE x const) will be replaced with (LT x ) and similarly + for GE, GEU, and LEU. + + If the condition cannot be understood, or is an inequality floating-point + comparison which needs to be reversed, 0 will be returned. + + If REVERSE is nonzero, then reverse the condition prior to canonizing it. + + If EARLIEST is nonzero, it is a pointer to a place where the earliest + insn used in locating the condition was found. If a replacement test + of the condition is desired, it should be placed in front of that + insn and we will be sure that the inputs are still valid. + + If WANT_REG is nonzero, we wish the condition to be relative to that + register, if possible. Therefore, do not canonicalize the condition + further. If ALLOW_CC_MODE is nonzero, allow the condition returned + to be a compare to a CC mode register. + + If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST + and at INSN. */ + +rtx +canonicalize_condition (rtx insn, rtx cond, int reverse, rtx *earliest, + rtx want_reg, int allow_cc_mode, int valid_at_insn_p) +{ + enum rtx_code code; + rtx prev = insn; + rtx set; + rtx tem; + rtx op0, op1; + int reverse_code = 0; + enum machine_mode mode; + basic_block bb = BLOCK_FOR_INSN (insn); + + code = GET_CODE (cond); + mode = GET_MODE (cond); + op0 = XEXP (cond, 0); + op1 = XEXP (cond, 1); + + if (reverse) + code = reversed_comparison_code (cond, insn); + if (code == UNKNOWN) + return 0; + + if (earliest) + *earliest = insn; + + /* If we are comparing a register with zero, see if the register is set + in the previous insn to a COMPARE or a comparison operation. Perform + the same tests as a function of STORE_FLAG_VALUE as find_comparison_args + in cse.c */ + + while ((GET_RTX_CLASS (code) == RTX_COMPARE + || GET_RTX_CLASS (code) == RTX_COMM_COMPARE) + && op1 == CONST0_RTX (GET_MODE (op0)) + && op0 != want_reg) + { + /* Set nonzero when we find something of interest. */ + rtx x = 0; + +#ifdef HAVE_cc0 + /* If comparison with cc0, import actual comparison from compare + insn. */ + if (op0 == cc0_rtx) + { + if ((prev = prev_nonnote_insn (prev)) == 0 + || !NONJUMP_INSN_P (prev) + || (set = single_set (prev)) == 0 + || SET_DEST (set) != cc0_rtx) + return 0; + + op0 = SET_SRC (set); + op1 = CONST0_RTX (GET_MODE (op0)); + if (earliest) + *earliest = prev; + } +#endif + + /* If this is a COMPARE, pick up the two things being compared. */ + if (GET_CODE (op0) == COMPARE) + { + op1 = XEXP (op0, 1); + op0 = XEXP (op0, 0); + continue; + } + else if (!REG_P (op0)) + break; + + /* Go back to the previous insn. Stop if it is not an INSN. We also + stop if it isn't a single set or if it has a REG_INC note because + we don't want to bother dealing with it. */ + + if ((prev = prev_nonnote_insn (prev)) == 0 + || !NONJUMP_INSN_P (prev) + || FIND_REG_INC_NOTE (prev, NULL_RTX) + /* In cfglayout mode, there do not have to be labels at the + beginning of a block, or jumps at the end, so the previous + conditions would not stop us when we reach bb boundary. */ + || BLOCK_FOR_INSN (prev) != bb) + break; + + set = set_of (op0, prev); + + if (set + && (GET_CODE (set) != SET + || !rtx_equal_p (SET_DEST (set), op0))) + break; + + /* If this is setting OP0, get what it sets it to if it looks + relevant. */ + if (set) + { + enum machine_mode inner_mode = GET_MODE (SET_DEST (set)); +#ifdef FLOAT_STORE_FLAG_VALUE + REAL_VALUE_TYPE fsfv; +#endif + + /* ??? We may not combine comparisons done in a CCmode with + comparisons not done in a CCmode. This is to aid targets + like Alpha that have an IEEE compliant EQ instruction, and + a non-IEEE compliant BEQ instruction. The use of CCmode is + actually artificial, simply to prevent the combination, but + should not affect other platforms. + + However, we must allow VOIDmode comparisons to match either + CCmode or non-CCmode comparison, because some ports have + modeless comparisons inside branch patterns. + + ??? This mode check should perhaps look more like the mode check + in simplify_comparison in combine. */ + + if ((GET_CODE (SET_SRC (set)) == COMPARE + || (((code == NE + || (code == LT + && GET_MODE_CLASS (inner_mode) == MODE_INT + && (GET_MODE_BITSIZE (inner_mode) + <= HOST_BITS_PER_WIDE_INT) + && (STORE_FLAG_VALUE + & ((HOST_WIDE_INT) 1 + << (GET_MODE_BITSIZE (inner_mode) - 1)))) +#ifdef FLOAT_STORE_FLAG_VALUE + || (code == LT + && SCALAR_FLOAT_MODE_P (inner_mode) + && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode), + REAL_VALUE_NEGATIVE (fsfv))) +#endif + )) + && COMPARISON_P (SET_SRC (set)))) + && (((GET_MODE_CLASS (mode) == MODE_CC) + == (GET_MODE_CLASS (inner_mode) == MODE_CC)) + || mode == VOIDmode || inner_mode == VOIDmode)) + x = SET_SRC (set); + else if (((code == EQ + || (code == GE + && (GET_MODE_BITSIZE (inner_mode) + <= HOST_BITS_PER_WIDE_INT) + && GET_MODE_CLASS (inner_mode) == MODE_INT + && (STORE_FLAG_VALUE + & ((HOST_WIDE_INT) 1 + << (GET_MODE_BITSIZE (inner_mode) - 1)))) +#ifdef FLOAT_STORE_FLAG_VALUE + || (code == GE + && SCALAR_FLOAT_MODE_P (inner_mode) + && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode), + REAL_VALUE_NEGATIVE (fsfv))) +#endif + )) + && COMPARISON_P (SET_SRC (set)) + && (((GET_MODE_CLASS (mode) == MODE_CC) + == (GET_MODE_CLASS (inner_mode) == MODE_CC)) + || mode == VOIDmode || inner_mode == VOIDmode)) + + { + reverse_code = 1; + x = SET_SRC (set); + } + else + break; + } + + else if (reg_set_p (op0, prev)) + /* If this sets OP0, but not directly, we have to give up. */ + break; + + if (x) + { + /* If the caller is expecting the condition to be valid at INSN, + make sure X doesn't change before INSN. */ + if (valid_at_insn_p) + if (modified_in_p (x, prev) || modified_between_p (x, prev, insn)) + break; + if (COMPARISON_P (x)) + code = GET_CODE (x); + if (reverse_code) + { + code = reversed_comparison_code (x, prev); + if (code == UNKNOWN) + return 0; + reverse_code = 0; + } + + op0 = XEXP (x, 0), op1 = XEXP (x, 1); + if (earliest) + *earliest = prev; + } + } + + /* If constant is first, put it last. */ + if (CONSTANT_P (op0)) + code = swap_condition (code), tem = op0, op0 = op1, op1 = tem; + + /* If OP0 is the result of a comparison, we weren't able to find what + was really being compared, so fail. */ + if (!allow_cc_mode + && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC) + return 0; + + /* Canonicalize any ordered comparison with integers involving equality + if we can do computations in the relevant mode and we do not + overflow. */ + + if (GET_MODE_CLASS (GET_MODE (op0)) != MODE_CC + && GET_CODE (op1) == CONST_INT + && GET_MODE (op0) != VOIDmode + && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT) + { + HOST_WIDE_INT const_val = INTVAL (op1); + unsigned HOST_WIDE_INT uconst_val = const_val; + unsigned HOST_WIDE_INT max_val + = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0)); + + switch (code) + { + case LE: + if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1) + code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0)); + break; + + /* When cross-compiling, const_val might be sign-extended from + BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */ + case GE: + if ((HOST_WIDE_INT) (const_val & max_val) + != (((HOST_WIDE_INT) 1 + << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1)))) + code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0)); + break; + + case LEU: + if (uconst_val < max_val) + code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0)); + break; + + case GEU: + if (uconst_val != 0) + code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0)); + break; + + default: + break; + } + } + + /* Never return CC0; return zero instead. */ + if (CC0_P (op0)) + return 0; + + return gen_rtx_fmt_ee (code, VOIDmode, op0, op1); +} + +/* Given a jump insn JUMP, return the condition that will cause it to branch + to its JUMP_LABEL. If the condition cannot be understood, or is an + inequality floating-point comparison which needs to be reversed, 0 will + be returned. + + If EARLIEST is nonzero, it is a pointer to a place where the earliest + insn used in locating the condition was found. If a replacement test + of the condition is desired, it should be placed in front of that + insn and we will be sure that the inputs are still valid. If EARLIEST + is null, the returned condition will be valid at INSN. + + If ALLOW_CC_MODE is nonzero, allow the condition returned to be a + compare CC mode register. + + VALID_AT_INSN_P is the same as for canonicalize_condition. */ + +rtx +get_condition (rtx jump, rtx *earliest, int allow_cc_mode, int valid_at_insn_p) +{ + rtx cond; + int reverse; + rtx set; + + /* If this is not a standard conditional jump, we can't parse it. */ + if (!JUMP_P (jump) + || ! any_condjump_p (jump)) + return 0; + set = pc_set (jump); + + cond = XEXP (SET_SRC (set), 0); + + /* If this branches to JUMP_LABEL when the condition is false, reverse + the condition. */ + reverse + = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF + && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump); + + return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX, + allow_cc_mode, valid_at_insn_p); +} + +/* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on + TARGET_MODE_REP_EXTENDED. + + Note that we assume that the property of + TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes + narrower than mode B. I.e., if A is a mode narrower than B then in + order to be able to operate on it in mode B, mode A needs to + satisfy the requirements set by the representation of mode B. */ + +static void +init_num_sign_bit_copies_in_rep (void) +{ + enum machine_mode mode, in_mode; + + for (in_mode = GET_CLASS_NARROWEST_MODE (MODE_INT); in_mode != VOIDmode; + in_mode = GET_MODE_WIDER_MODE (mode)) + for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != in_mode; + mode = GET_MODE_WIDER_MODE (mode)) + { + enum machine_mode i; + + /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED + extends to the next widest mode. */ + gcc_assert (targetm.mode_rep_extended (mode, in_mode) == UNKNOWN + || GET_MODE_WIDER_MODE (mode) == in_mode); + + /* We are in in_mode. Count how many bits outside of mode + have to be copies of the sign-bit. */ + for (i = mode; i != in_mode; i = GET_MODE_WIDER_MODE (i)) + { + enum machine_mode wider = GET_MODE_WIDER_MODE (i); + + if (targetm.mode_rep_extended (i, wider) == SIGN_EXTEND + /* We can only check sign-bit copies starting from the + top-bit. In order to be able to check the bits we + have already seen we pretend that subsequent bits + have to be sign-bit copies too. */ + || num_sign_bit_copies_in_rep [in_mode][mode]) + num_sign_bit_copies_in_rep [in_mode][mode] + += GET_MODE_BITSIZE (wider) - GET_MODE_BITSIZE (i); + } + } +} + +/* Suppose that truncation from the machine mode of X to MODE is not a + no-op. See if there is anything special about X so that we can + assume it already contains a truncated value of MODE. */ + +bool +truncated_to_mode (enum machine_mode mode, rtx x) +{ + /* This register has already been used in MODE without explicit + truncation. */ + if (REG_P (x) && rtl_hooks.reg_truncated_to_mode (mode, x)) + return true; + + /* See if we already satisfy the requirements of MODE. If yes we + can just switch to MODE. */ + if (num_sign_bit_copies_in_rep[GET_MODE (x)][mode] + && (num_sign_bit_copies (x, GET_MODE (x)) + >= num_sign_bit_copies_in_rep[GET_MODE (x)][mode] + 1)) + return true; + + return false; +} + +/* Initialize non_rtx_starting_operands, which is used to speed up + for_each_rtx. */ +void +init_rtlanal (void) +{ + int i; + for (i = 0; i < NUM_RTX_CODE; i++) + { + const char *format = GET_RTX_FORMAT (i); + const char *first = strpbrk (format, "eEV"); + non_rtx_starting_operands[i] = first ? first - format : -1; + } + + init_num_sign_bit_copies_in_rep (); +} + +/* Check whether this is a constant pool constant. */ +bool +constant_pool_constant_p (rtx x) +{ + x = avoid_constant_pool_reference (x); + return GET_CODE (x) == CONST_DOUBLE; +} +