X-Git-Url: http://git.sourceforge.jp/view?a=blobdiff_plain;f=gcc%2Frecog.c;h=1b81704d5375dd19395e6e7e10f838fe9f9e3ad1;hb=07c46636ce59515e30fddc0217bcb7dfe31173ba;hp=6a20e86c224c95949e51c7535ade505faef44d1f;hpb=95e5c2053e45d6b707479c19acd0d3f8a58c1610;p=pf3gnuchains%2Fgcc-fork.git diff --git a/gcc/recog.c b/gcc/recog.c index 6a20e86c224..1b81704d537 100644 --- a/gcc/recog.c +++ b/gcc/recog.c @@ -1,6 +1,7 @@ /* Subroutines used by or related to instruction recognition. Copyright (C) 1987, 1988, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998 - 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc. + 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007 + Free Software Foundation, Inc. This file is part of GCC. @@ -16,8 +17,8 @@ for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free -Software Foundation, 59 Temple Place - Suite 330, Boston, MA -02111-1307, USA. */ +Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA +02110-1301, USA. */ #include "config.h" @@ -31,6 +32,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA #include "hard-reg-set.h" #include "recog.h" #include "regs.h" +#include "addresses.h" #include "expr.h" #include "function.h" #include "flags.h" @@ -39,6 +41,8 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA #include "basic-block.h" #include "output.h" #include "reload.h" +#include "timevar.h" +#include "tree-pass.h" #ifndef STACK_PUSH_CODE #ifdef STACK_GROWS_DOWNWARD @@ -56,10 +60,10 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA #endif #endif -static void validate_replace_rtx_1 PARAMS ((rtx *, rtx, rtx, rtx)); -static rtx *find_single_use_1 PARAMS ((rtx, rtx *)); -static void validate_replace_src_1 PARAMS ((rtx *, void *)); -static rtx split_insn PARAMS ((rtx)); +static void validate_replace_rtx_1 (rtx *, rtx, rtx, rtx); +static rtx *find_single_use_1 (rtx, rtx *); +static void validate_replace_src_1 (rtx *, void *); +static rtx split_insn (rtx); /* Nonzero means allow operands to be volatile. This should be 0 if you are generating rtl, such as if you are calling @@ -88,46 +92,31 @@ int which_alternative; int reload_completed; +/* Nonzero after thread_prologue_and_epilogue_insns has run. */ +int epilogue_completed; + /* Initialize data used by the function `recog'. This must be called once in the compilation of a function before any insn recognition may be done in the function. */ void -init_recog_no_volatile () +init_recog_no_volatile (void) { volatile_ok = 0; } void -init_recog () +init_recog (void) { volatile_ok = 1; } -/* Try recognizing the instruction INSN, - and return the code number that results. - Remember the code so that repeated calls do not - need to spend the time for actual rerecognition. - - This function is the normal interface to instruction recognition. - The automatically-generated function `recog' is normally called - through this one. (The only exception is in combine.c.) */ - -int -recog_memoized_1 (insn) - rtx insn; -{ - if (INSN_CODE (insn) < 0) - INSN_CODE (insn) = recog (PATTERN (insn), insn, 0); - return INSN_CODE (insn); -} /* Check that X is an insn-body for an `asm' with operands and that the operands mentioned in it are legitimate. */ int -check_asm_operands (x) - rtx x; +check_asm_operands (rtx x) { int noperands; rtx *operands; @@ -149,10 +138,10 @@ check_asm_operands (x) if (noperands == 0) return 1; - operands = (rtx *) alloca (noperands * sizeof (rtx)); - constraints = (const char **) alloca (noperands * sizeof (char *)); + operands = alloca (noperands * sizeof (rtx)); + constraints = alloca (noperands * sizeof (char *)); - decode_asm_operands (x, operands, NULL, constraints, NULL); + decode_asm_operands (x, operands, NULL, constraints, NULL, NULL); for (i = 0; i < noperands; i++) { @@ -203,19 +192,14 @@ static int num_changes = 0; Otherwise, perform the change and return 1. */ int -validate_change (object, loc, new, in_group) - rtx object; - rtx *loc; - rtx new; - int in_group; +validate_change (rtx object, rtx *loc, rtx new, int in_group) { rtx old = *loc; if (old == new || rtx_equal_p (old, new)) return 1; - if (in_group == 0 && num_changes != 0) - abort (); + gcc_assert (in_group != 0 || num_changes == 0); *loc = new; @@ -229,16 +213,14 @@ validate_change (object, loc, new, in_group) else changes_allocated *= 2; - changes = - (change_t*) xrealloc (changes, - sizeof (change_t) * changes_allocated); + changes = xrealloc (changes, sizeof (change_t) * changes_allocated); } changes[num_changes].object = object; changes[num_changes].loc = loc; changes[num_changes].old = old; - if (object && GET_CODE (object) != MEM) + if (object && !MEM_P (object)) { /* Set INSN_CODE to force rerecognition of insn. Save old code in case invalid. */ @@ -257,12 +239,34 @@ validate_change (object, loc, new, in_group) return apply_change_group (); } +/* Keep X canonicalized if some changes have made it non-canonical; only + modifies the operands of X, not (for example) its code. Simplifications + are not the job of this routine. + + Return true if anything was changed. */ +bool +canonicalize_change_group (rtx insn, rtx x) +{ + if (COMMUTATIVE_P (x) + && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1))) + { + /* Oops, the caller has made X no longer canonical. + Let's redo the changes in the correct order. */ + rtx tem = XEXP (x, 0); + validate_change (insn, &XEXP (x, 0), XEXP (x, 1), 1); + validate_change (insn, &XEXP (x, 1), tem, 1); + return true; + } + else + return false; +} + + /* This subroutine of apply_change_group verifies whether the changes to INSN were valid; i.e. whether INSN can still be recognized. */ int -insn_invalid_p (insn) - rtx insn; +insn_invalid_p (rtx insn) { rtx pat = PATTERN (insn); int num_clobbers = 0; @@ -312,16 +316,16 @@ insn_invalid_p (insn) /* Return number of changes made and not validated yet. */ int -num_changes_pending () +num_changes_pending (void) { return num_changes; } -/* Apply a group of changes previously issued with `validate_change'. +/* Tentatively apply the changes numbered NUM and up. Return 1 if all changes are valid, zero otherwise. */ int -apply_change_group () +verify_changes (int num) { int i; rtx last_validated = NULL_RTX; @@ -335,16 +339,16 @@ apply_change_group () we also require that the operands meet the constraints for the insn. */ - for (i = 0; i < num_changes; i++) + for (i = num; i < num_changes; i++) { rtx object = changes[i].object; - /* if there is no object to test or if it is the same as the one we + /* If there is no object to test or if it is the same as the one we already tested, ignore it. */ if (object == 0 || object == last_validated) continue; - if (GET_CODE (object) == MEM) + if (MEM_P (object)) { if (! memory_address_p (GET_MODE (object), XEXP (object, 0))) break; @@ -399,17 +403,38 @@ apply_change_group () last_validated = object; } - if (i == num_changes) - { - basic_block bb; + return (i == num_changes); +} - for (i = 0; i < num_changes; i++) - if (changes[i].object - && INSN_P (changes[i].object) - && (bb = BLOCK_FOR_INSN (changes[i].object))) - bb->flags |= BB_DIRTY; +/* A group of changes has previously been issued with validate_change and + verified with verify_changes. Update the BB_DIRTY flags of the affected + blocks, and clear num_changes. */ - num_changes = 0; +void +confirm_change_group (void) +{ + int i; + basic_block bb; + + for (i = 0; i < num_changes; i++) + if (changes[i].object + && INSN_P (changes[i].object) + && (bb = BLOCK_FOR_INSN (changes[i].object))) + bb->flags |= BB_DIRTY; + + num_changes = 0; +} + +/* Apply a group of changes previously issued with `validate_change'. + If all changes are valid, call confirm_change_group and return 1, + otherwise, call cancel_changes and return 0. */ + +int +apply_change_group (void) +{ + if (verify_changes (0)) + { + confirm_change_group (); return 1; } else @@ -419,10 +444,11 @@ apply_change_group () } } + /* Return the number of changes so far in the current group. */ int -num_validated_changes () +num_validated_changes (void) { return num_changes; } @@ -430,8 +456,7 @@ num_validated_changes () /* Retract the changes numbered NUM and up. */ void -cancel_changes (num) - int num; +cancel_changes (int num) { int i; @@ -440,7 +465,7 @@ cancel_changes (num) for (i = num_changes - 1; i >= num; i--) { *changes[i].loc = changes[i].old; - if (changes[i].object && GET_CODE (changes[i].object) != MEM) + if (changes[i].object && !MEM_P (changes[i].object)) INSN_CODE (changes[i].object) = changes[i].old_code; } num_changes = num; @@ -450,9 +475,7 @@ cancel_changes (num) validate_change passing OBJECT. */ static void -validate_replace_rtx_1 (loc, from, to, object) - rtx *loc; - rtx from, to, object; +validate_replace_rtx_1 (rtx *loc, rtx from, rtx to, rtx object) { int i, j; const char *fmt; @@ -475,7 +498,7 @@ validate_replace_rtx_1 (loc, from, to, object) operands look similar. */ if (x == from - || (GET_CODE (x) == REG && GET_CODE (from) == REG + || (REG_P (x) && REG_P (from) && GET_MODE (x) == GET_MODE (from) && REGNO (x) == REGNO (from)) || (GET_CODE (x) == GET_CODE (from) && GET_MODE (x) == GET_MODE (from) @@ -485,16 +508,38 @@ validate_replace_rtx_1 (loc, from, to, object) return; } - /* Call ourself recursively to perform the replacements. */ + /* Call ourself recursively to perform the replacements. + We must not replace inside already replaced expression, otherwise we + get infinite recursion for replacements like (reg X)->(subreg (reg X)) + done by regmove, so we must special case shared ASM_OPERANDS. */ - for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) + if (GET_CODE (x) == PARALLEL) { - if (fmt[i] == 'e') - validate_replace_rtx_1 (&XEXP (x, i), from, to, object); - else if (fmt[i] == 'E') - for (j = XVECLEN (x, i) - 1; j >= 0; j--) - validate_replace_rtx_1 (&XVECEXP (x, i, j), from, to, object); + for (j = XVECLEN (x, 0) - 1; j >= 0; j--) + { + if (j && GET_CODE (XVECEXP (x, 0, j)) == SET + && GET_CODE (SET_SRC (XVECEXP (x, 0, j))) == ASM_OPERANDS) + { + /* Verify that operands are really shared. */ + gcc_assert (ASM_OPERANDS_INPUT_VEC (SET_SRC (XVECEXP (x, 0, 0))) + == ASM_OPERANDS_INPUT_VEC (SET_SRC (XVECEXP + (x, 0, j)))); + validate_replace_rtx_1 (&SET_DEST (XVECEXP (x, 0, j)), + from, to, object); + } + else + validate_replace_rtx_1 (&XVECEXP (x, 0, j), from, to, object); + } } + else + for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) + { + if (fmt[i] == 'e') + validate_replace_rtx_1 (&XEXP (x, i), from, to, object); + else if (fmt[i] == 'E') + for (j = XVECLEN (x, i) - 1; j >= 0; j--) + validate_replace_rtx_1 (&XVECEXP (x, i, j), from, to, object); + } /* If we didn't substitute, there is nothing more to do. */ if (num_changes == prev_changes) @@ -508,11 +553,11 @@ validate_replace_rtx_1 (loc, from, to, object) /* Do changes needed to keep rtx consistent. Don't do any other simplifications, as it is not our job. */ - if ((GET_RTX_CLASS (code) == '<' || GET_RTX_CLASS (code) == 'c') + if (SWAPPABLE_OPERANDS_P (x) && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1))) { validate_change (object, loc, - gen_rtx_fmt_ee (GET_RTX_CLASS (code) == 'c' ? code + gen_rtx_fmt_ee (COMMUTATIVE_ARITH_P (x) ? code : swap_condition (code), GET_MODE (x), XEXP (x, 1), XEXP (x, 0)), 1); @@ -573,7 +618,7 @@ validate_replace_rtx_1 (loc, from, to, object) likely to be an insertion operation; if it was, nothing bad will happen, we might just fail in some cases). */ - if (GET_CODE (XEXP (x, 0)) == MEM + if (MEM_P (XEXP (x, 0)) && GET_CODE (XEXP (x, 1)) == CONST_INT && GET_CODE (XEXP (x, 2)) == CONST_INT && !mode_dependent_address_p (XEXP (XEXP (x, 0), 0)) @@ -628,24 +673,11 @@ validate_replace_rtx_1 (loc, from, to, object) } } -/* Try replacing every occurrence of FROM in subexpression LOC of INSN - with TO. After all changes have been made, validate by seeing - if INSN is still valid. */ - -int -validate_replace_rtx_subexp (from, to, insn, loc) - rtx from, to, insn, *loc; -{ - validate_replace_rtx_1 (loc, from, to, insn); - return apply_change_group (); -} - /* Try replacing every occurrence of FROM in INSN with TO. After all changes have been made, validate by seeing if INSN is still valid. */ int -validate_replace_rtx (from, to, insn) - rtx from, to, insn; +validate_replace_rtx (rtx from, rtx to, rtx insn) { validate_replace_rtx_1 (&PATTERN (insn), from, to, insn); return apply_change_group (); @@ -654,8 +686,7 @@ validate_replace_rtx (from, to, insn) /* Try replacing every occurrence of FROM in INSN with TO. */ void -validate_replace_rtx_group (from, to, insn) - rtx from, to, insn; +validate_replace_rtx_group (rtx from, rtx to, rtx insn) { validate_replace_rtx_1 (&PATTERN (insn), from, to, insn); } @@ -669,9 +700,7 @@ struct validate_replace_src_data }; static void -validate_replace_src_1 (x, data) - rtx *x; - void *data; +validate_replace_src_1 (rtx *x, void *data) { struct validate_replace_src_data *d = (struct validate_replace_src_data *) data; @@ -683,8 +712,7 @@ validate_replace_src_1 (x, data) SET_DESTs. */ void -validate_replace_src_group (from, to, insn) - rtx from, to, insn; +validate_replace_src_group (rtx from, rtx to, rtx insn) { struct validate_replace_src_data d; @@ -694,14 +722,44 @@ validate_replace_src_group (from, to, insn) note_uses (&PATTERN (insn), validate_replace_src_1, &d); } -/* Same as validate_replace_src_group, but validate by seeing if - INSN is still valid. */ -int -validate_replace_src (from, to, insn) - rtx from, to, insn; +/* Try simplify INSN. + Invoke simplify_rtx () on every SET_SRC and SET_DEST inside the INSN's + pattern and return true if something was simplified. */ + +bool +validate_simplify_insn (rtx insn) { - validate_replace_src_group (from, to, insn); - return apply_change_group (); + int i; + rtx pat = NULL; + rtx newpat = NULL; + + pat = PATTERN (insn); + + if (GET_CODE (pat) == SET) + { + newpat = simplify_rtx (SET_SRC (pat)); + if (newpat && !rtx_equal_p (SET_SRC (pat), newpat)) + validate_change (insn, &SET_SRC (pat), newpat, 1); + newpat = simplify_rtx (SET_DEST (pat)); + if (newpat && !rtx_equal_p (SET_DEST (pat), newpat)) + validate_change (insn, &SET_DEST (pat), newpat, 1); + } + else if (GET_CODE (pat) == PARALLEL) + for (i = 0; i < XVECLEN (pat, 0); i++) + { + rtx s = XVECEXP (pat, 0, i); + + if (GET_CODE (XVECEXP (pat, 0, i)) == SET) + { + newpat = simplify_rtx (SET_SRC (s)); + if (newpat && !rtx_equal_p (SET_SRC (s), newpat)) + validate_change (insn, &SET_SRC (s), newpat, 1); + newpat = simplify_rtx (SET_DEST (s)); + if (newpat && !rtx_equal_p (SET_DEST (s), newpat)) + validate_change (insn, &SET_DEST (s), newpat, 1); + } + } + return ((num_changes_pending () > 0) && (apply_change_group () > 0)); } #ifdef HAVE_cc0 @@ -710,8 +768,7 @@ validate_replace_src (from, to, insn) EQ and NE tests do not count. */ int -next_insn_tests_no_inequality (insn) - rtx insn; +next_insn_tests_no_inequality (rtx insn) { rtx next = next_cc0_user (insn); @@ -719,39 +776,9 @@ next_insn_tests_no_inequality (insn) if (next == 0) return 0; - return ((GET_CODE (next) == JUMP_INSN - || GET_CODE (next) == INSN - || GET_CODE (next) == CALL_INSN) + return (INSN_P (next) && ! inequality_comparisons_p (PATTERN (next))); } - -#if 0 /* This is useless since the insn that sets the cc's - must be followed immediately by the use of them. */ -/* Return 1 if the CC value set up by INSN is not used. */ - -int -next_insns_test_no_inequality (insn) - rtx insn; -{ - rtx next = NEXT_INSN (insn); - - for (; next != 0; next = NEXT_INSN (next)) - { - if (GET_CODE (next) == CODE_LABEL - || GET_CODE (next) == BARRIER) - return 1; - if (GET_CODE (next) == NOTE) - continue; - if (inequality_comparisons_p (PATTERN (next))) - return 0; - if (sets_cc0_p (PATTERN (next)) == 1) - return 1; - if (! reg_mentioned_p (cc0_rtx, PATTERN (next))) - return 1; - } - return 1; -} -#endif #endif /* This is used by find_single_use to locate an rtx that contains exactly one @@ -760,9 +787,7 @@ next_insns_test_no_inequality (insn) DEST that are being used to totally replace it are not counted. */ static rtx * -find_single_use_1 (dest, loc) - rtx dest; - rtx *loc; +find_single_use_1 (rtx dest, rtx *loc) { rtx x = *loc; enum rtx_code code = GET_CODE (x); @@ -789,9 +814,9 @@ find_single_use_1 (dest, loc) need just check the source. */ if (GET_CODE (SET_DEST (x)) != CC0 && GET_CODE (SET_DEST (x)) != PC - && GET_CODE (SET_DEST (x)) != REG + && !REG_P (SET_DEST (x)) && ! (GET_CODE (SET_DEST (x)) == SUBREG - && GET_CODE (SUBREG_REG (SET_DEST (x))) == REG + && REG_P (SUBREG_REG (SET_DEST (x))) && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x)))) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD) == ((GET_MODE_SIZE (GET_MODE (SET_DEST (x))) @@ -817,7 +842,7 @@ find_single_use_1 (dest, loc) if (fmt[i] == 'e') { if (dest == XEXP (x, i) - || (GET_CODE (dest) == REG && GET_CODE (XEXP (x, i)) == REG + || (REG_P (dest) && REG_P (XEXP (x, i)) && REGNO (dest) == REGNO (XEXP (x, i)))) this_result = loc; else @@ -836,8 +861,8 @@ find_single_use_1 (dest, loc) for (j = XVECLEN (x, i) - 1; j >= 0; j--) { if (XVECEXP (x, i, j) == dest - || (GET_CODE (dest) == REG - && GET_CODE (XVECEXP (x, i, j)) == REG + || (REG_P (dest) + && REG_P (XVECEXP (x, i, j)) && REGNO (XVECEXP (x, i, j)) == REGNO (dest))) this_result = loc; else @@ -873,10 +898,7 @@ find_single_use_1 (dest, loc) and last insn referencing DEST. */ rtx * -find_single_use (dest, insn, ploc) - rtx dest; - rtx insn; - rtx *ploc; +find_single_use (rtx dest, rtx insn, rtx *ploc) { rtx next; rtx *result; @@ -887,7 +909,7 @@ find_single_use (dest, insn, ploc) { next = NEXT_INSN (insn); if (next == 0 - || (GET_CODE (next) != INSN && GET_CODE (next) != JUMP_INSN)) + || (!NONJUMP_INSN_P (next) && !JUMP_P (next))) return 0; result = find_single_use_1 (dest, &PATTERN (next)); @@ -897,11 +919,11 @@ find_single_use (dest, insn, ploc) } #endif - if (reload_completed || reload_in_progress || GET_CODE (dest) != REG) + if (reload_completed || reload_in_progress || !REG_P (dest)) return 0; for (next = next_nonnote_insn (insn); - next != 0 && GET_CODE (next) != CODE_LABEL; + next != 0 && !LABEL_P (next); next = next_nonnote_insn (next)) if (INSN_P (next) && dead_or_set_p (next, dest)) { @@ -939,9 +961,7 @@ find_single_use (dest, insn, ploc) class NO_REGS, see the comment for `register_operand'. */ int -general_operand (op, mode) - rtx op; - enum machine_mode mode; +general_operand (rtx op, enum machine_mode mode) { enum rtx_code code = GET_CODE (op); @@ -963,9 +983,7 @@ general_operand (op, mode) if (CONSTANT_P (op)) return ((GET_MODE (op) == VOIDmode || GET_MODE (op) == mode || mode == VOIDmode) -#ifdef LEGITIMATE_PIC_OPERAND_P && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op)) -#endif && LEGITIMATE_CONSTANT_P (op)); /* Except for certain constants with VOIDmode, already checked for, @@ -980,8 +998,10 @@ general_operand (op, mode) #ifdef INSN_SCHEDULING /* On machines that have insn scheduling, we want all memory - reference to be explicit, so outlaw paradoxical SUBREGs. */ - if (GET_CODE (sub) == MEM + reference to be explicit, so outlaw paradoxical SUBREGs. + However, we must allow them after reload so that they can + get cleaned up by cleanup_subreg_operands. */ + if (!reload_completed && MEM_P (sub) && GET_MODE_SIZE (mode) > GET_MODE_SIZE (GET_MODE (sub))) return 0; #endif @@ -992,12 +1012,12 @@ general_operand (op, mode) ??? This is a kludge. */ if (!reload_completed && SUBREG_BYTE (op) != 0 - && GET_CODE (sub) == MEM) + && MEM_P (sub)) return 0; /* FLOAT_MODE subregs can't be paradoxical. Combine will occasionally - create such rtl, and we must reject it. */ - if (GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT + create such rtl, and we must reject it. */ + if (SCALAR_FLOAT_MODE_P (GET_MODE (op)) && GET_MODE_SIZE (GET_MODE (op)) > GET_MODE_SIZE (GET_MODE (sub))) return 0; @@ -1017,23 +1037,12 @@ general_operand (op, mode) if (! volatile_ok && MEM_VOLATILE_P (op)) return 0; - if (GET_CODE (y) == ADDRESSOF) - return 1; - /* Use the mem's mode, since it will be reloaded thus. */ - mode = GET_MODE (op); - GO_IF_LEGITIMATE_ADDRESS (mode, y, win); + if (memory_address_p (GET_MODE (op), y)) + return 1; } - /* Pretend this is an operand for now; we'll run force_operand - on its replacement in fixup_var_refs_1. */ - if (code == ADDRESSOF) - return 1; - return 0; - - win: - return 1; } /* Return 1 if OP is a valid memory address for a memory reference @@ -1043,9 +1052,7 @@ general_operand (op, mode) expressions in the machine description. */ int -address_operand (op, mode) - rtx op; - enum machine_mode mode; +address_operand (rtx op, enum machine_mode mode) { return memory_address_p (mode, op); } @@ -1065,9 +1072,7 @@ address_operand (op, mode) it is most consistent to keep this function from accepting them. */ int -register_operand (op, mode) - rtx op; - enum machine_mode mode; +register_operand (rtx op, enum machine_mode mode) { if (GET_MODE (op) != mode && mode != VOIDmode) return 0; @@ -1082,13 +1087,13 @@ register_operand (op, mode) (Ideally, (SUBREG (MEM)...) should not exist after reload, but currently it does result from (SUBREG (REG)...) where the reg went on the stack.) */ - if (! reload_completed && GET_CODE (sub) == MEM) + if (! reload_completed && MEM_P (sub)) return general_operand (op, mode); #ifdef CANNOT_CHANGE_MODE_CLASS - if (GET_CODE (sub) == REG + if (REG_P (sub) && REGNO (sub) < FIRST_PSEUDO_REGISTER - && REG_CANNOT_CHANGE_MODE_P (REGNO (sub), mode, GET_MODE (sub)) + && REG_CANNOT_CHANGE_MODE_P (REGNO (sub), GET_MODE (sub), mode) && GET_MODE_CLASS (GET_MODE (sub)) != MODE_COMPLEX_INT && GET_MODE_CLASS (GET_MODE (sub)) != MODE_COMPLEX_FLOAT) return 0; @@ -1096,21 +1101,16 @@ register_operand (op, mode) /* FLOAT_MODE subregs can't be paradoxical. Combine will occasionally create such rtl, and we must reject it. */ - if (GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT + if (SCALAR_FLOAT_MODE_P (GET_MODE (op)) && GET_MODE_SIZE (GET_MODE (op)) > GET_MODE_SIZE (GET_MODE (sub))) return 0; op = sub; } - /* If we have an ADDRESSOF, consider it valid since it will be - converted into something that will not be a MEM. */ - if (GET_CODE (op) == ADDRESSOF) - return 1; - /* We don't consider registers whose class is NO_REGS to be a register operand. */ - return (GET_CODE (op) == REG + return (REG_P (op) && (REGNO (op) >= FIRST_PSEUDO_REGISTER || REGNO_REG_CLASS (REGNO (op)) != NO_REGS)); } @@ -1118,9 +1118,7 @@ register_operand (op, mode) /* Return 1 for a register in Pmode; ignore the tested mode. */ int -pmode_register_operand (op, mode) - rtx op; - enum machine_mode mode ATTRIBUTE_UNUSED; +pmode_register_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { return register_operand (op, Pmode); } @@ -1129,15 +1127,13 @@ pmode_register_operand (op, mode) or a hard register. */ int -scratch_operand (op, mode) - rtx op; - enum machine_mode mode; +scratch_operand (rtx op, enum machine_mode mode) { if (GET_MODE (op) != mode && mode != VOIDmode) return 0; return (GET_CODE (op) == SCRATCH - || (GET_CODE (op) == REG + || (REG_P (op) && REGNO (op) < FIRST_PSEUDO_REGISTER)); } @@ -1147,9 +1143,7 @@ scratch_operand (op, mode) expressions in the machine description. */ int -immediate_operand (op, mode) - rtx op; - enum machine_mode mode; +immediate_operand (rtx op, enum machine_mode mode) { /* Don't accept CONST_INT or anything similar if the caller wants something floating. */ @@ -1163,27 +1157,17 @@ immediate_operand (op, mode) && trunc_int_for_mode (INTVAL (op), mode) != INTVAL (op)) return 0; - /* Accept CONSTANT_P_RTX, since it will be gone by CSE1 and - result in 0/1. It seems a safe assumption that this is - in range for everyone. */ - if (GET_CODE (op) == CONSTANT_P_RTX) - return 1; - return (CONSTANT_P (op) && (GET_MODE (op) == mode || mode == VOIDmode || GET_MODE (op) == VOIDmode) -#ifdef LEGITIMATE_PIC_OPERAND_P && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op)) -#endif && LEGITIMATE_CONSTANT_P (op)); } /* Returns 1 if OP is an operand that is a CONST_INT. */ int -const_int_operand (op, mode) - rtx op; - enum machine_mode mode; +const_int_operand (rtx op, enum machine_mode mode) { if (GET_CODE (op) != CONST_INT) return 0; @@ -1199,9 +1183,7 @@ const_int_operand (op, mode) floating-point number. */ int -const_double_operand (op, mode) - rtx op; - enum machine_mode mode; +const_double_operand (rtx op, enum machine_mode mode) { /* Don't accept CONST_INT or anything similar if the caller wants something floating. */ @@ -1218,9 +1200,7 @@ const_double_operand (op, mode) /* Return 1 if OP is a general operand that is not an immediate operand. */ int -nonimmediate_operand (op, mode) - rtx op; - enum machine_mode mode; +nonimmediate_operand (rtx op, enum machine_mode mode) { return (general_operand (op, mode) && ! CONSTANT_P (op)); } @@ -1228,9 +1208,7 @@ nonimmediate_operand (op, mode) /* Return 1 if OP is a register reference or immediate value of mode MODE. */ int -nonmemory_operand (op, mode) - rtx op; - enum machine_mode mode; +nonmemory_operand (rtx op, enum machine_mode mode) { if (CONSTANT_P (op)) { @@ -1248,9 +1226,7 @@ nonmemory_operand (op, mode) return ((GET_MODE (op) == VOIDmode || GET_MODE (op) == mode || mode == VOIDmode) -#ifdef LEGITIMATE_PIC_OPERAND_P && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op)) -#endif && LEGITIMATE_CONSTANT_P (op)); } @@ -1265,14 +1241,14 @@ nonmemory_operand (op, mode) (Ideally, (SUBREG (MEM)...) should not exist after reload, but currently it does result from (SUBREG (REG)...) where the reg went on the stack.) */ - if (! reload_completed && GET_CODE (SUBREG_REG (op)) == MEM) + if (! reload_completed && MEM_P (SUBREG_REG (op))) return general_operand (op, mode); op = SUBREG_REG (op); } /* We don't consider registers whose class is NO_REGS to be a register operand. */ - return (GET_CODE (op) == REG + return (REG_P (op) && (REGNO (op) >= FIRST_PSEUDO_REGISTER || REGNO_REG_CLASS (REGNO (op)) != NO_REGS)); } @@ -1284,9 +1260,7 @@ nonmemory_operand (op, mode) expressions in the machine description. */ int -push_operand (op, mode) - rtx op; - enum machine_mode mode; +push_operand (rtx op, enum machine_mode mode) { unsigned int rounded_size = GET_MODE_SIZE (mode); @@ -1294,7 +1268,7 @@ push_operand (op, mode) rounded_size = PUSH_ROUNDING (rounded_size); #endif - if (GET_CODE (op) != MEM) + if (!MEM_P (op)) return 0; if (mode != VOIDmode && GET_MODE (op) != mode) @@ -1316,7 +1290,7 @@ push_operand (op, mode) #ifdef STACK_GROWS_DOWNWARD || INTVAL (XEXP (XEXP (op, 1), 1)) != - (int) rounded_size #else - || INTVAL (XEXP (XEXP (op, 1), 1)) != rounded_size + || INTVAL (XEXP (XEXP (op, 1), 1)) != (int) rounded_size #endif ) return 0; @@ -1332,11 +1306,9 @@ push_operand (op, mode) expressions in the machine description. */ int -pop_operand (op, mode) - rtx op; - enum machine_mode mode; +pop_operand (rtx op, enum machine_mode mode) { - if (GET_CODE (op) != MEM) + if (!MEM_P (op)) return 0; if (mode != VOIDmode && GET_MODE (op) != mode) @@ -1353,13 +1325,8 @@ pop_operand (op, mode) /* Return 1 if ADDR is a valid memory address for mode MODE. */ int -memory_address_p (mode, addr) - enum machine_mode mode ATTRIBUTE_UNUSED; - rtx addr; +memory_address_p (enum machine_mode mode ATTRIBUTE_UNUSED, rtx addr) { - if (GET_CODE (addr) == ADDRESSOF) - return 1; - GO_IF_LEGITIMATE_ADDRESS (mode, addr, win); return 0; @@ -1374,16 +1341,14 @@ memory_address_p (mode, addr) expressions in the machine description. */ int -memory_operand (op, mode) - rtx op; - enum machine_mode mode; +memory_operand (rtx op, enum machine_mode mode) { rtx inner; if (! reload_completed) /* Note that no SUBREG is a memory operand before end of reload pass, because (SUBREG (MEM...)) forces reloading into a register. */ - return GET_CODE (op) == MEM && general_operand (op, mode); + return MEM_P (op) && general_operand (op, mode); if (mode != VOIDmode && GET_MODE (op) != mode) return 0; @@ -1392,20 +1357,18 @@ memory_operand (op, mode) if (GET_CODE (inner) == SUBREG) inner = SUBREG_REG (inner); - return (GET_CODE (inner) == MEM && general_operand (op, mode)); + return (MEM_P (inner) && general_operand (op, mode)); } /* Return 1 if OP is a valid indirect memory reference with mode MODE; that is, a memory reference whose address is a general_operand. */ int -indirect_operand (op, mode) - rtx op; - enum machine_mode mode; +indirect_operand (rtx op, enum machine_mode mode) { /* Before reload, a SUBREG isn't in memory (see memory_operand, above). */ if (! reload_completed - && GET_CODE (op) == SUBREG && GET_CODE (SUBREG_REG (op)) == MEM) + && GET_CODE (op) == SUBREG && MEM_P (SUBREG_REG (op))) { int offset = SUBREG_BYTE (op); rtx inner = SUBREG_REG (op); @@ -1425,7 +1388,7 @@ indirect_operand (op, mode) && general_operand (XEXP (XEXP (inner, 0), 0), Pmode))); } - return (GET_CODE (op) == MEM + return (MEM_P (op) && memory_operand (op, mode) && general_operand (XEXP (op, 0), Pmode)); } @@ -1434,12 +1397,10 @@ indirect_operand (op, mode) MATCH_OPERATOR to recognize all the branch insns. */ int -comparison_operator (op, mode) - rtx op; - enum machine_mode mode; +comparison_operator (rtx op, enum machine_mode mode) { return ((mode == VOIDmode || GET_MODE (op) == mode) - && GET_RTX_CLASS (GET_CODE (op)) == '<'); + && COMPARISON_P (op)); } /* If BODY is an insn body that uses ASM_OPERANDS, @@ -1447,8 +1408,7 @@ comparison_operator (op, mode) Otherwise return -1. */ int -asm_noperands (body) - rtx body; +asm_noperands (rtx body) { switch (GET_CODE (body)) { @@ -1532,20 +1492,17 @@ asm_noperands (body) we don't store that info. */ const char * -decode_asm_operands (body, operands, operand_locs, constraints, modes) - rtx body; - rtx *operands; - rtx **operand_locs; - const char **constraints; - enum machine_mode *modes; +decode_asm_operands (rtx body, rtx *operands, rtx **operand_locs, + const char **constraints, enum machine_mode *modes, + location_t *loc) { int i; int noperands; - const char *template = 0; + rtx asmop = 0; if (GET_CODE (body) == SET && GET_CODE (SET_SRC (body)) == ASM_OPERANDS) { - rtx asmop = SET_SRC (body); + asmop = SET_SRC (body); /* Single output operand: BODY is (set OUTPUT (asm_operands ....)). */ noperands = ASM_OPERANDS_INPUT_LENGTH (asmop) + 1; @@ -1572,11 +1529,10 @@ decode_asm_operands (body, operands, operand_locs, constraints, modes) constraints[0] = ASM_OPERANDS_OUTPUT_CONSTRAINT (asmop); if (modes) modes[0] = GET_MODE (SET_DEST (body)); - template = ASM_OPERANDS_TEMPLATE (asmop); } else if (GET_CODE (body) == ASM_OPERANDS) { - rtx asmop = body; + asmop = body; /* No output operands: BODY is (asm_operands ....). */ noperands = ASM_OPERANDS_INPUT_LENGTH (asmop); @@ -1594,17 +1550,18 @@ decode_asm_operands (body, operands, operand_locs, constraints, modes) if (modes) modes[i] = ASM_OPERANDS_INPUT_MODE (asmop, i); } - template = ASM_OPERANDS_TEMPLATE (asmop); } else if (GET_CODE (body) == PARALLEL && GET_CODE (XVECEXP (body, 0, 0)) == SET && GET_CODE (SET_SRC (XVECEXP (body, 0, 0))) == ASM_OPERANDS) { - rtx asmop = SET_SRC (XVECEXP (body, 0, 0)); int nparallel = XVECLEN (body, 0); /* Includes CLOBBERs. */ - int nin = ASM_OPERANDS_INPUT_LENGTH (asmop); + int nin; int nout = 0; /* Does not include CLOBBERs. */ + asmop = SET_SRC (XVECEXP (body, 0, 0)); + nin = ASM_OPERANDS_INPUT_LENGTH (asmop); + /* At least one output, plus some CLOBBERs. */ /* The outputs are in the SETs. @@ -1636,16 +1593,16 @@ decode_asm_operands (body, operands, operand_locs, constraints, modes) if (modes) modes[i + nout] = ASM_OPERANDS_INPUT_MODE (asmop, i); } - - template = ASM_OPERANDS_TEMPLATE (asmop); } else if (GET_CODE (body) == PARALLEL && GET_CODE (XVECEXP (body, 0, 0)) == ASM_OPERANDS) { /* No outputs, but some CLOBBERs. */ - rtx asmop = XVECEXP (body, 0, 0); - int nin = ASM_OPERANDS_INPUT_LENGTH (asmop); + int nin; + + asmop = XVECEXP (body, 0, 0); + nin = ASM_OPERANDS_INPUT_LENGTH (asmop); for (i = 0; i < nin; i++) { @@ -1659,25 +1616,31 @@ decode_asm_operands (body, operands, operand_locs, constraints, modes) modes[i] = ASM_OPERANDS_INPUT_MODE (asmop, i); } - template = ASM_OPERANDS_TEMPLATE (asmop); } - return template; + if (loc) + { +#ifdef USE_MAPPED_LOCATION + *loc = ASM_OPERANDS_SOURCE_LOCATION (asmop); +#else + loc->file = ASM_OPERANDS_SOURCE_FILE (asmop); + loc->line = ASM_OPERANDS_SOURCE_LINE (asmop); +#endif + } + + return ASM_OPERANDS_TEMPLATE (asmop); } -/* Check if an asm_operand matches it's constraints. +/* Check if an asm_operand matches its constraints. Return > 0 if ok, = 0 if bad, < 0 if inconclusive. */ int -asm_operand_ok (op, constraint) - rtx op; - const char *constraint; +asm_operand_ok (rtx op, const char *constraint) { int result = 0; /* Use constrain_operands after reload. */ - if (reload_completed) - abort (); + gcc_assert (!reload_completed); while (*constraint) { @@ -1735,7 +1698,7 @@ asm_operand_ok (op, constraint) Match any memory and hope things are resolved after reload. */ - if (GET_CODE (op) == MEM + if (MEM_P (op) && (1 || GET_CODE (XEXP (op, 0)) == PRE_DEC || GET_CODE (XEXP (op, 0)) == POST_DEC)) @@ -1743,7 +1706,7 @@ asm_operand_ok (op, constraint) break; case '>': - if (GET_CODE (op) == MEM + if (MEM_P (op) && (1 || GET_CODE (XEXP (op, 0)) == PRE_INC || GET_CODE (XEXP (op, 0)) == POST_INC)) @@ -1774,14 +1737,10 @@ asm_operand_ok (op, constraint) || (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == VOIDmode)) break; - /* FALLTHRU */ + /* Fall through. */ case 'i': - if (CONSTANT_P (op) -#ifdef LEGITIMATE_PIC_OPERAND_P - && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op)) -#endif - ) + if (CONSTANT_P (op) && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op))) result = 1; break; @@ -1835,6 +1794,7 @@ asm_operand_ok (op, constraint) case 'X': result = 1; + break; case 'g': if (general_operand (op, VOIDmode)) @@ -1853,20 +1813,16 @@ asm_operand_ok (op, constraint) result = 1; } #ifdef EXTRA_CONSTRAINT_STR - if (EXTRA_CONSTRAINT_STR (op, c, constraint)) + else if (EXTRA_CONSTRAINT_STR (op, c, constraint)) + result = 1; + else if (EXTRA_MEMORY_CONSTRAINT (c, constraint) + /* Every memory operand can be reloaded to fit. */ + && memory_operand (op, VOIDmode)) + result = 1; + else if (EXTRA_ADDRESS_CONSTRAINT (c, constraint) + /* Every address operand can be reloaded to fit. */ + && address_operand (op, VOIDmode)) result = 1; - if (EXTRA_MEMORY_CONSTRAINT (c, constraint)) - { - /* Every memory operand can be reloaded to fit. */ - if (memory_operand (op, VOIDmode)) - result = 1; - } - if (EXTRA_ADDRESS_CONSTRAINT (c, constraint)) - { - /* Every address operand can be reloaded to fit. */ - if (address_operand (op, VOIDmode)) - result = 1; - } #endif break; } @@ -1886,8 +1842,7 @@ asm_operand_ok (op, constraint) Otherwise, return a null pointer. */ rtx * -find_constant_term_loc (p) - rtx *p; +find_constant_term_loc (rtx *p) { rtx *tem; enum rtx_code code = GET_CODE (*p); @@ -1940,10 +1895,9 @@ find_constant_term_loc (p) don't use it before reload. */ int -offsettable_memref_p (op) - rtx op; +offsettable_memref_p (rtx op) { - return ((GET_CODE (op) == MEM) + return ((MEM_P (op)) && offsettable_address_p (1, GET_MODE (op), XEXP (op, 0))); } @@ -1951,10 +1905,9 @@ offsettable_memref_p (op) consider pseudo-regs valid as index or base regs. */ int -offsettable_nonstrict_memref_p (op) - rtx op; +offsettable_nonstrict_memref_p (rtx op) { - return ((GET_CODE (op) == MEM) + return ((MEM_P (op)) && offsettable_address_p (0, GET_MODE (op), XEXP (op, 0))); } @@ -1969,16 +1922,13 @@ offsettable_nonstrict_memref_p (op) for the sake of use in reload.c. */ int -offsettable_address_p (strictp, mode, y) - int strictp; - enum machine_mode mode; - rtx y; +offsettable_address_p (int strictp, enum machine_mode mode, rtx y) { enum rtx_code ycode = GET_CODE (y); rtx z; rtx y1 = y; rtx *y2; - int (*addressp) PARAMS ((enum machine_mode, rtx)) = + int (*addressp) (enum machine_mode, rtx) = (strictp ? strict_memory_address_p : memory_address_p); unsigned int mode_sz = GET_MODE_SIZE (mode); @@ -2016,7 +1966,7 @@ offsettable_address_p (strictp, mode, y) return good; } - if (GET_RTX_CLASS (ycode) == 'a') + if (GET_RTX_CLASS (ycode) == RTX_AUTOINC) return 0; /* The offset added here is chosen as the maximum offset that @@ -2044,68 +1994,51 @@ offsettable_address_p (strictp, mode, y) because the amount of the increment depends on the mode. */ int -mode_dependent_address_p (addr) - rtx addr ATTRIBUTE_UNUSED; /* Maybe used in GO_IF_MODE_DEPENDENT_ADDRESS. */ -{ +mode_dependent_address_p (rtx addr) +{ + /* Auto-increment addressing with anything other than post_modify + or pre_modify always introduces a mode dependency. Catch such + cases now instead of deferring to the target. */ + if (GET_CODE (addr) == PRE_INC + || GET_CODE (addr) == POST_INC + || GET_CODE (addr) == PRE_DEC + || GET_CODE (addr) == POST_DEC) + return 1; + GO_IF_MODE_DEPENDENT_ADDRESS (addr, win); return 0; /* Label `win' might (not) be used via GO_IF_MODE_DEPENDENT_ADDRESS. */ win: ATTRIBUTE_UNUSED_LABEL return 1; } - -/* Return 1 if OP is a general operand - other than a memory ref with a mode dependent address. */ - -int -mode_independent_operand (op, mode) - enum machine_mode mode; - rtx op; -{ - rtx addr; - - if (! general_operand (op, mode)) - return 0; - - if (GET_CODE (op) != MEM) - return 1; - - addr = XEXP (op, 0); - GO_IF_MODE_DEPENDENT_ADDRESS (addr, lose); - return 1; - /* Label `lose' might (not) be used via GO_IF_MODE_DEPENDENT_ADDRESS. */ - lose: ATTRIBUTE_UNUSED_LABEL - return 0; -} /* Like extract_insn, but save insn extracted and don't extract again, when called again for the same insn expecting that recog_data still contain the valid information. This is used primary by gen_attr infrastructure that often does extract insn again and again. */ void -extract_insn_cached (insn) - rtx insn; +extract_insn_cached (rtx insn) { if (recog_data.insn == insn && INSN_CODE (insn) >= 0) return; extract_insn (insn); recog_data.insn = insn; } -/* Do cached extract_insn, constrain_operand and complain about failures. + +/* Do cached extract_insn, constrain_operands and complain about failures. Used by insn_attrtab. */ void -extract_constrain_insn_cached (insn) - rtx insn; +extract_constrain_insn_cached (rtx insn) { extract_insn_cached (insn); if (which_alternative == -1 && !constrain_operands (reload_completed)) fatal_insn_not_found (insn); } -/* Do cached constrain_operand and complain about failures. */ + +/* Do cached constrain_operands and complain about failures. */ int -constrain_operands_cached (strict) - int strict; +constrain_operands_cached (int strict) { if (which_alternative == -1) return constrain_operands (strict); @@ -2116,8 +2049,7 @@ constrain_operands_cached (strict) /* Analyze INSN and fill in recog_data. */ void -extract_insn (insn) - rtx insn; +extract_insn (rtx insn) { int i; int icode; @@ -2159,14 +2091,13 @@ extract_insn (insn) /* This insn is an `asm' with operands. */ /* expand_asm_operands makes sure there aren't too many operands. */ - if (noperands > MAX_RECOG_OPERANDS) - abort (); + gcc_assert (noperands <= MAX_RECOG_OPERANDS); /* Now get the operand values and constraints out of the insn. */ decode_asm_operands (body, recog_data.operand, recog_data.operand_loc, recog_data.constraints, - recog_data.operand_mode); + recog_data.operand_mode, NULL); if (noperands > 0) { const char *p = recog_data.constraints[0]; @@ -2208,19 +2139,21 @@ extract_insn (insn) : recog_data.constraints[i][0] == '+' ? OP_INOUT : OP_IN); - if (recog_data.n_alternatives > MAX_RECOG_ALTERNATIVES) - abort (); + gcc_assert (recog_data.n_alternatives <= MAX_RECOG_ALTERNATIVES); } /* After calling extract_insn, you can use this function to extract some information from the constraint strings into a more usable form. The collected data is stored in recog_op_alt. */ void -preprocess_constraints () +preprocess_constraints (void) { int i; - memset (recog_op_alt, 0, sizeof recog_op_alt); + for (i = 0; i < recog_data.n_operands; i++) + memset (recog_op_alt[i], 0, (recog_data.n_alternatives + * sizeof (struct operand_alternative))); + for (i = 0; i < recog_data.n_operands; i++) { int j; @@ -2231,7 +2164,7 @@ preprocess_constraints () for (j = 0; j < recog_data.n_alternatives; j++) { - op_alt[j].class = NO_REGS; + op_alt[j].cl = NO_REGS; op_alt[j].constraint = p; op_alt[j].matches = -1; op_alt[j].matched = -1; @@ -2306,12 +2239,14 @@ preprocess_constraints () case 'p': op_alt[j].is_address = 1; - op_alt[j].class = reg_class_subunion[(int) op_alt[j].class] - [(int) MODE_BASE_REG_CLASS (VOIDmode)]; + op_alt[j].cl = reg_class_subunion[(int) op_alt[j].cl] + [(int) base_reg_class (VOIDmode, ADDRESS, SCRATCH)]; break; - case 'g': case 'r': - op_alt[j].class = reg_class_subunion[(int) op_alt[j].class][(int) GENERAL_REGS]; + case 'g': + case 'r': + op_alt[j].cl = + reg_class_subunion[(int) op_alt[j].cl][(int) GENERAL_REGS]; break; default: @@ -2323,16 +2258,17 @@ preprocess_constraints () if (EXTRA_ADDRESS_CONSTRAINT (c, p)) { op_alt[j].is_address = 1; - op_alt[j].class + op_alt[j].cl = (reg_class_subunion - [(int) op_alt[j].class] - [(int) MODE_BASE_REG_CLASS (VOIDmode)]); + [(int) op_alt[j].cl] + [(int) base_reg_class (VOIDmode, ADDRESS, + SCRATCH)]); break; } - op_alt[j].class + op_alt[j].cl = (reg_class_subunion - [(int) op_alt[j].class] + [(int) op_alt[j].cl] [(int) REG_CLASS_FROM_CONSTRAINT ((unsigned char) c, p)]); break; } @@ -2375,8 +2311,7 @@ struct funny_match }; int -constrain_operands (strict) - int strict; +constrain_operands (int strict) { const char *constraints[MAX_RECOG_OPERANDS]; int matching_operands[MAX_RECOG_OPERANDS]; @@ -2398,6 +2333,7 @@ constrain_operands (strict) do { + int seen_earlyclobber_at = -1; int opno; int lose = 0; funny_match_index = 0; @@ -2416,12 +2352,12 @@ constrain_operands (strict) /* A unary operator may be accepted by the predicate, but it is irrelevant for matching constraints. */ - if (GET_RTX_CLASS (GET_CODE (op)) == '1') + if (UNARY_P (op)) op = XEXP (op, 0); if (GET_CODE (op) == SUBREG) { - if (GET_CODE (SUBREG_REG (op)) == REG + if (REG_P (SUBREG_REG (op)) && REGNO (SUBREG_REG (op)) < FIRST_PSEUDO_REGISTER) offset = subreg_regno_offset (REGNO (SUBREG_REG (op)), GET_MODE (SUBREG_REG (op)), @@ -2460,6 +2396,8 @@ constrain_operands (strict) case '&': earlyclobber[opno] = 1; + if (seen_earlyclobber_at < 0) + seen_earlyclobber_at = opno; break; case '0': case '1': case '2': case '3': case '4': @@ -2489,9 +2427,9 @@ constrain_operands (strict) /* A unary operator may be accepted by the predicate, but it is irrelevant for matching constraints. */ - if (GET_RTX_CLASS (GET_CODE (op1)) == '1') + if (UNARY_P (op1)) op1 = XEXP (op1, 0); - if (GET_RTX_CLASS (GET_CODE (op2)) == '1') + if (UNARY_P (op2)) op2 = XEXP (op2, 0); val = operands_match_p (op1, op2); @@ -2527,16 +2465,22 @@ constrain_operands (strict) break; /* No need to check general_operand again; - it was done in insn-recog.c. */ + it was done in insn-recog.c. Well, except that reload + doesn't check the validity of its replacements, but + that should only matter when there's a bug. */ case 'g': /* Anything goes unless it is a REG and really has a hard reg but the hard reg is not in the class GENERAL_REGS. */ - if (strict < 0 - || GENERAL_REGS == ALL_REGS - || GET_CODE (op) != REG - || (reload_in_progress - && REGNO (op) >= FIRST_PSEUDO_REGISTER) - || reg_fits_class_p (op, GENERAL_REGS, offset, mode)) + if (REG_P (op)) + { + if (strict < 0 + || GENERAL_REGS == ALL_REGS + || (reload_in_progress + && REGNO (op) >= FIRST_PSEUDO_REGISTER) + || reg_fits_class_p (op, GENERAL_REGS, offset, mode)) + win = 1; + } + else if (strict < 0 || general_operand (op, mode)) win = 1; break; @@ -2548,24 +2492,37 @@ constrain_operands (strict) break; case 'm': - if (GET_CODE (op) == MEM - /* Before reload, accept what reload can turn into mem. */ - || (strict < 0 && CONSTANT_P (op)) - /* During reload, accept a pseudo */ - || (reload_in_progress && GET_CODE (op) == REG - && REGNO (op) >= FIRST_PSEUDO_REGISTER)) + /* Memory operands must be valid, to the extent + required by STRICT. */ + if (MEM_P (op)) + { + if (strict > 0 + && !strict_memory_address_p (GET_MODE (op), + XEXP (op, 0))) + break; + if (strict == 0 + && !memory_address_p (GET_MODE (op), XEXP (op, 0))) + break; + win = 1; + } + /* Before reload, accept what reload can turn into mem. */ + else if (strict < 0 && CONSTANT_P (op)) + win = 1; + /* During reload, accept a pseudo */ + else if (reload_in_progress && REG_P (op) + && REGNO (op) >= FIRST_PSEUDO_REGISTER) win = 1; break; case '<': - if (GET_CODE (op) == MEM + if (MEM_P (op) && (GET_CODE (XEXP (op, 0)) == PRE_DEC || GET_CODE (XEXP (op, 0)) == POST_DEC)) win = 1; break; case '>': - if (GET_CODE (op) == MEM + if (MEM_P (op) && (GET_CODE (XEXP (op, 0)) == PRE_INC || GET_CODE (XEXP (op, 0)) == POST_INC)) win = 1; @@ -2617,12 +2574,12 @@ constrain_operands (strict) break; case 'V': - if (GET_CODE (op) == MEM + if (MEM_P (op) && ((strict > 0 && ! offsettable_memref_p (op)) || (strict < 0 - && !(CONSTANT_P (op) || GET_CODE (op) == MEM)) + && !(CONSTANT_P (op) || MEM_P (op))) || (reload_in_progress - && !(GET_CODE (op) == REG + && !(REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)))) win = 1; break; @@ -2632,55 +2589,48 @@ constrain_operands (strict) || (strict == 0 && offsettable_nonstrict_memref_p (op)) /* Before reload, accept what reload can handle. */ || (strict < 0 - && (CONSTANT_P (op) || GET_CODE (op) == MEM)) + && (CONSTANT_P (op) || MEM_P (op))) /* During reload, accept a pseudo */ - || (reload_in_progress && GET_CODE (op) == REG + || (reload_in_progress && REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)) win = 1; break; default: { - enum reg_class class; + enum reg_class cl; - class = (c == 'r' + cl = (c == 'r' ? GENERAL_REGS : REG_CLASS_FROM_CONSTRAINT (c, p)); - if (class != NO_REGS) + if (cl != NO_REGS) { if (strict < 0 || (strict == 0 - && GET_CODE (op) == REG + && REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER) || (strict == 0 && GET_CODE (op) == SCRATCH) - || (GET_CODE (op) == REG - && reg_fits_class_p (op, class, offset, mode))) + || (REG_P (op) + && reg_fits_class_p (op, cl, offset, mode))) win = 1; } #ifdef EXTRA_CONSTRAINT_STR else if (EXTRA_CONSTRAINT_STR (op, c, p)) win = 1; - if (EXTRA_MEMORY_CONSTRAINT (c, p)) - { - /* Every memory operand can be reloaded to fit. */ - if (strict < 0 && GET_CODE (op) == MEM) - win = 1; - - /* Before reload, accept what reload can turn into mem. */ - if (strict < 0 && CONSTANT_P (op)) - win = 1; - - /* During reload, accept a pseudo */ - if (reload_in_progress && GET_CODE (op) == REG - && REGNO (op) >= FIRST_PSEUDO_REGISTER) - win = 1; - } - if (EXTRA_ADDRESS_CONSTRAINT (c, p)) - { - /* Every address operand can be reloaded to fit. */ - if (strict < 0) - win = 1; - } + else if (EXTRA_MEMORY_CONSTRAINT (c, p) + /* Every memory operand can be reloaded to fit. */ + && ((strict < 0 && MEM_P (op)) + /* Before reload, accept what reload can turn + into mem. */ + || (strict < 0 && CONSTANT_P (op)) + /* During reload, accept a pseudo */ + || (reload_in_progress && REG_P (op) + && REGNO (op) >= FIRST_PSEUDO_REGISTER))) + win = 1; + else if (EXTRA_ADDRESS_CONSTRAINT (c, p) + /* Every address operand can be reloaded to fit. */ + && strict < 0) + win = 1; #endif break; } @@ -2702,15 +2652,17 @@ constrain_operands (strict) /* See if any earlyclobber operand conflicts with some other operand. */ - if (strict > 0) - for (eopno = 0; eopno < recog_data.n_operands; eopno++) + if (strict > 0 && seen_earlyclobber_at >= 0) + for (eopno = seen_earlyclobber_at; + eopno < recog_data.n_operands; + eopno++) /* Ignore earlyclobber operands now in memory, because we would often report failure when we have two memory operands, one of which was formerly a REG. */ if (earlyclobber[eopno] - && GET_CODE (recog_data.operand[eopno]) == REG) + && REG_P (recog_data.operand[eopno])) for (opno = 0; opno < recog_data.n_operands; opno++) - if ((GET_CODE (recog_data.operand[opno]) == MEM + if ((MEM_P (recog_data.operand[opno]) || recog_data.operand_type[opno] != OP_OUT) && opno != eopno /* Ignore things like match_operator operands. */ @@ -2753,139 +2705,137 @@ constrain_operands (strict) If REG occupies multiple hard regs, all of them must be in CLASS. */ int -reg_fits_class_p (operand, class, offset, mode) - rtx operand; - enum reg_class class; - int offset; - enum machine_mode mode; +reg_fits_class_p (rtx operand, enum reg_class cl, int offset, + enum machine_mode mode) { int regno = REGNO (operand); - if (regno < FIRST_PSEUDO_REGISTER - && TEST_HARD_REG_BIT (reg_class_contents[(int) class], - regno + offset)) - { - int sr; - regno += offset; - for (sr = HARD_REGNO_NREGS (regno, mode) - 1; - sr > 0; sr--) - if (! TEST_HARD_REG_BIT (reg_class_contents[(int) class], - regno + sr)) - break; - return sr == 0; - } - return 0; + if (cl == NO_REGS) + return 0; + + return (regno < FIRST_PSEUDO_REGISTER + && in_hard_reg_set_p (reg_class_contents[(int) cl], + mode, regno + offset)); } -/* Split single instruction. Helper function for split_all_insns. - Return last insn in the sequence if successful, or NULL if unsuccessful. */ +/* Split single instruction. Helper function for split_all_insns and + split_all_insns_noflow. Return last insn in the sequence if successful, + or NULL if unsuccessful. */ + static rtx -split_insn (insn) - rtx insn; +split_insn (rtx insn) { - rtx set; - if (!INSN_P (insn)) - ; - /* Don't split no-op move insns. These should silently - disappear later in final. Splitting such insns would - break the code that handles REG_NO_CONFLICT blocks. */ + /* Split insns here to get max fine-grain parallelism. */ + rtx first = PREV_INSN (insn); + rtx last = try_split (PATTERN (insn), insn, 1); - else if ((set = single_set (insn)) != NULL && set_noop_p (set)) - { - /* Nops get in the way while scheduling, so delete them - now if register allocation has already been done. It - is too risky to try to do this before register - allocation, and there are unlikely to be very many - nops then anyways. */ - if (reload_completed) - delete_insn_and_edges (insn); - } - else - { - /* Split insns here to get max fine-grain parallelism. */ - rtx first = PREV_INSN (insn); - rtx last = try_split (PATTERN (insn), insn, 1); + if (last == insn) + return NULL_RTX; - if (last != insn) + /* try_split returns the NOTE that INSN became. */ + SET_INSN_DELETED (insn); + + /* ??? Coddle to md files that generate subregs in post-reload + splitters instead of computing the proper hard register. */ + if (reload_completed && first != last) + { + first = NEXT_INSN (first); + for (;;) { - /* try_split returns the NOTE that INSN became. */ - PUT_CODE (insn, NOTE); - NOTE_SOURCE_FILE (insn) = 0; - NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED; - - /* ??? Coddle to md files that generate subregs in post- - reload splitters instead of computing the proper - hard register. */ - if (reload_completed && first != last) - { - first = NEXT_INSN (first); - while (1) - { - if (INSN_P (first)) - cleanup_subreg_operands (first); - if (first == last) - break; - first = NEXT_INSN (first); - } - } - return last; + if (INSN_P (first)) + cleanup_subreg_operands (first); + if (first == last) + break; + first = NEXT_INSN (first); } } - return NULL_RTX; + return last; } + /* Split all insns in the function. If UPD_LIFE, update life info after. */ void -split_all_insns (upd_life) - int upd_life; +split_all_insns (int upd_life) { sbitmap blocks; - int changed; + bool changed; basic_block bb; blocks = sbitmap_alloc (last_basic_block); sbitmap_zero (blocks); - changed = 0; + changed = false; FOR_EACH_BB_REVERSE (bb) { rtx insn, next; bool finish = false; - for (insn = bb->head; !finish ; insn = next) + for (insn = BB_HEAD (bb); !finish ; insn = next) { - rtx last; - /* Can't use `next_real_insn' because that might go across CODE_LABELS and short-out basic blocks. */ next = NEXT_INSN (insn); - finish = (insn == bb->end); - last = split_insn (insn); - if (last) + finish = (insn == BB_END (bb)); + if (INSN_P (insn)) { - /* The split sequence may include barrier, but the - BB boundary we are interested in will be set to previous - one. */ - - while (GET_CODE (last) == BARRIER) - last = PREV_INSN (last); - SET_BIT (blocks, bb->index); - changed = 1; - insn = last; + rtx set = single_set (insn); + + /* Don't split no-op move insns. These should silently + disappear later in final. Splitting such insns would + break the code that handles REG_NO_CONFLICT blocks. */ + if (set && set_noop_p (set)) + { + /* Nops get in the way while scheduling, so delete them + now if register allocation has already been done. It + is too risky to try to do this before register + allocation, and there are unlikely to be very many + nops then anyways. */ + if (reload_completed) + { + /* If the no-op set has a REG_UNUSED note, we need + to update liveness information. */ + if (find_reg_note (insn, REG_UNUSED, NULL_RTX)) + { + SET_BIT (blocks, bb->index); + changed = true; + } + /* ??? Is life info affected by deleting edges? */ + delete_insn_and_edges (insn); + } + } + else + { + rtx last = split_insn (insn); + if (last) + { + /* The split sequence may include barrier, but the + BB boundary we are interested in will be set to + previous one. */ + + while (BARRIER_P (last)) + last = PREV_INSN (last); + SET_BIT (blocks, bb->index); + changed = true; + } + } } } } if (changed) { + int old_last_basic_block = last_basic_block; + find_many_sub_basic_blocks (blocks); + + if (old_last_basic_block != last_basic_block && upd_life) + blocks = sbitmap_resize (blocks, last_basic_block, 1); } if (changed && upd_life) - { - count_or_remove_death_notes (blocks, 1); - update_life_info (blocks, UPDATE_LIFE_LOCAL, PROP_DEATH_NOTES); - } + update_life_info (blocks, UPDATE_LIFE_GLOBAL_RM_NOTES, + PROP_DEATH_NOTES); + #ifdef ENABLE_CHECKING verify_flow_info (); #endif @@ -2896,17 +2846,37 @@ split_all_insns (upd_life) /* Same as split_all_insns, but do not expect CFG to be available. Used by machine dependent reorg passes. */ -void -split_all_insns_noflow () +unsigned int +split_all_insns_noflow (void) { rtx next, insn; for (insn = get_insns (); insn; insn = next) { next = NEXT_INSN (insn); - split_insn (insn); + if (INSN_P (insn)) + { + /* Don't split no-op move insns. These should silently + disappear later in final. Splitting such insns would + break the code that handles REG_NO_CONFLICT blocks. */ + rtx set = single_set (insn); + if (set && set_noop_p (set)) + { + /* Nops get in the way while scheduling, so delete them + now if register allocation has already been done. It + is too risky to try to do this before register + allocation, and there are unlikely to be very many + nops then anyways. + + ??? Should we use delete_insn when the CFG isn't valid? */ + if (reload_completed) + delete_insn_and_edges (insn); + } + else + split_insn (insn); + } } - return; + return 0; } #ifdef HAVE_peephole2 @@ -2918,6 +2888,8 @@ struct peep2_insn_data static struct peep2_insn_data peep2_insn_data[MAX_INSNS_PER_PEEP2 + 1]; static int peep2_current; +/* The number of instructions available to match a peep2. */ +int peep2_current_count; /* A non-insn marker indicating the last insn of the block. The live_before regset for this element is correct, indicating @@ -2929,18 +2901,14 @@ static int peep2_current; in a multi-insn pattern. */ rtx -peep2_next_insn (n) - int n; +peep2_next_insn (int n) { - if (n >= MAX_INSNS_PER_PEEP2 + 1) - abort (); + gcc_assert (n <= peep2_current_count); n += peep2_current; if (n >= MAX_INSNS_PER_PEEP2 + 1) n -= MAX_INSNS_PER_PEEP2 + 1; - if (peep2_insn_data[n].insn == PEEP2_EOB) - return NULL_RTX; return peep2_insn_data[n].insn; } @@ -2948,19 +2916,15 @@ peep2_next_insn (n) after `current'. */ int -peep2_regno_dead_p (ofs, regno) - int ofs; - int regno; +peep2_regno_dead_p (int ofs, int regno) { - if (ofs >= MAX_INSNS_PER_PEEP2 + 1) - abort (); + gcc_assert (ofs < MAX_INSNS_PER_PEEP2 + 1); ofs += peep2_current; if (ofs >= MAX_INSNS_PER_PEEP2 + 1) ofs -= MAX_INSNS_PER_PEEP2 + 1; - if (peep2_insn_data[ofs].insn == NULL_RTX) - abort (); + gcc_assert (peep2_insn_data[ofs].insn != NULL_RTX); return ! REGNO_REG_SET_P (peep2_insn_data[ofs].live_before, regno); } @@ -2968,24 +2932,20 @@ peep2_regno_dead_p (ofs, regno) /* Similarly for a REG. */ int -peep2_reg_dead_p (ofs, reg) - int ofs; - rtx reg; +peep2_reg_dead_p (int ofs, rtx reg) { int regno, n; - if (ofs >= MAX_INSNS_PER_PEEP2 + 1) - abort (); + gcc_assert (ofs < MAX_INSNS_PER_PEEP2 + 1); ofs += peep2_current; if (ofs >= MAX_INSNS_PER_PEEP2 + 1) ofs -= MAX_INSNS_PER_PEEP2 + 1; - if (peep2_insn_data[ofs].insn == NULL_RTX) - abort (); + gcc_assert (peep2_insn_data[ofs].insn != NULL_RTX); regno = REGNO (reg); - n = HARD_REGNO_NREGS (regno, GET_MODE (reg)); + n = hard_regno_nregs[regno][GET_MODE (reg)]; while (--n >= 0) if (REGNO_REG_SET_P (peep2_insn_data[ofs].live_before, regno + n)) return 0; @@ -3004,19 +2964,16 @@ peep2_reg_dead_p (ofs, reg) returned. */ rtx -peep2_find_free_register (from, to, class_str, mode, reg_set) - int from, to; - const char *class_str; - enum machine_mode mode; - HARD_REG_SET *reg_set; +peep2_find_free_register (int from, int to, const char *class_str, + enum machine_mode mode, HARD_REG_SET *reg_set) { static int search_ofs; - enum reg_class class; + enum reg_class cl; HARD_REG_SET live; int i; - if (from >= MAX_INSNS_PER_PEEP2 + 1 || to >= MAX_INSNS_PER_PEEP2 + 1) - abort (); + gcc_assert (from < MAX_INSNS_PER_PEEP2 + 1); + gcc_assert (to < MAX_INSNS_PER_PEEP2 + 1); from += peep2_current; if (from >= MAX_INSNS_PER_PEEP2 + 1) @@ -3025,8 +2982,7 @@ peep2_find_free_register (from, to, class_str, mode, reg_set) if (to >= MAX_INSNS_PER_PEEP2 + 1) to -= MAX_INSNS_PER_PEEP2 + 1; - if (peep2_insn_data[from].insn == NULL_RTX) - abort (); + gcc_assert (peep2_insn_data[from].insn != NULL_RTX); REG_SET_TO_HARD_REG_SET (live, peep2_insn_data[from].live_before); while (from != to) @@ -3035,13 +2991,12 @@ peep2_find_free_register (from, to, class_str, mode, reg_set) if (++from >= MAX_INSNS_PER_PEEP2 + 1) from = 0; - if (peep2_insn_data[from].insn == NULL_RTX) - abort (); + gcc_assert (peep2_insn_data[from].insn != NULL_RTX); REG_SET_TO_HARD_REG_SET (this_live, peep2_insn_data[from].live_before); IOR_HARD_REG_SET (live, this_live); } - class = (class_str[0] == 'r' ? GENERAL_REGS + cl = (class_str[0] == 'r' ? GENERAL_REGS : REG_CLASS_FROM_CONSTRAINT (class_str[0], class_str)); for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) @@ -3062,7 +3017,7 @@ peep2_find_free_register (from, to, class_str, mode, reg_set) if (fixed_regs[regno]) continue; /* Make sure the register is of the right class. */ - if (! TEST_HARD_REG_BIT (reg_class_contents[class], regno)) + if (! TEST_HARD_REG_BIT (reg_class_contents[cl], regno)) continue; /* And can support the mode we need. */ if (! HARD_REGNO_MODE_OK (regno, mode)) @@ -3076,7 +3031,7 @@ peep2_find_free_register (from, to, class_str, mode, reg_set) continue; success = 1; - for (j = HARD_REGNO_NREGS (regno, mode) - 1; j >= 0; j--) + for (j = hard_regno_nregs[regno][mode] - 1; j >= 0; j--) { if (TEST_HARD_REG_BIT (*reg_set, regno + j) || TEST_HARD_REG_BIT (live, regno + j)) @@ -3087,8 +3042,7 @@ peep2_find_free_register (from, to, class_str, mode, reg_set) } if (success) { - for (j = HARD_REGNO_NREGS (regno, mode) - 1; j >= 0; j--) - SET_HARD_REG_BIT (*reg_set, regno + j); + add_to_hard_reg_set (reg_set, mode, regno); /* Start the next search with the next register. */ if (++raw_regno >= FIRST_PSEUDO_REGISTER) @@ -3105,11 +3059,9 @@ peep2_find_free_register (from, to, class_str, mode, reg_set) /* Perform the peephole2 optimization pass. */ -void -peephole2_optimize (dump_file) - FILE *dump_file ATTRIBUTE_UNUSED; +static void +peephole2_optimize (void) { - regset_head rs_heads[MAX_INSNS_PER_PEEP2 + 2]; rtx insn, prev; regset live; int i; @@ -3119,12 +3071,13 @@ peephole2_optimize (dump_file) bool changed; #endif bool do_cleanup_cfg = false; + bool do_global_life_update = false; bool do_rebuild_jump_labels = false; /* Initialize the regsets we're going to use. */ for (i = 0; i < MAX_INSNS_PER_PEEP2 + 1; ++i) - peep2_insn_data[i].live_before = INITIALIZE_REG_SET (rs_heads[i]); - live = INITIALIZE_REG_SET (rs_heads[i]); + peep2_insn_data[i].live_before = ALLOC_REG_SET (®_obstack); + live = ALLOC_REG_SET (®_obstack); #ifdef HAVE_conditional_execution blocks = sbitmap_alloc (last_basic_block); @@ -3137,17 +3090,20 @@ peephole2_optimize (dump_file) FOR_EACH_BB_REVERSE (bb) { struct propagate_block_info *pbi; + reg_set_iterator rsi; + unsigned int j; /* Indicate that all slots except the last holds invalid data. */ for (i = 0; i < MAX_INSNS_PER_PEEP2; ++i) peep2_insn_data[i].insn = NULL_RTX; + peep2_current_count = 0; /* Indicate that the last slot contains live_after data. */ peep2_insn_data[MAX_INSNS_PER_PEEP2].insn = PEEP2_EOB; peep2_current = MAX_INSNS_PER_PEEP2; /* Start up propagation. */ - COPY_REG_SET (live, bb->global_live_at_end); + COPY_REG_SET (live, bb->il.rtl->global_live_at_end); COPY_REG_SET (peep2_insn_data[MAX_INSNS_PER_PEEP2].live_before, live); #ifdef HAVE_conditional_execution @@ -3156,7 +3112,7 @@ peephole2_optimize (dump_file) pbi = init_propagate_block_info (bb, live, NULL, NULL, PROP_DEATH_NOTES); #endif - for (insn = bb->end; ; insn = prev) + for (insn = BB_END (bb); ; insn = prev) { prev = PREV_INSN (insn); if (INSN_P (insn)) @@ -3169,12 +3125,25 @@ peephole2_optimize (dump_file) /* Record this insn. */ if (--peep2_current < 0) peep2_current = MAX_INSNS_PER_PEEP2; + if (peep2_current_count < MAX_INSNS_PER_PEEP2 + && peep2_insn_data[peep2_current].insn == NULL_RTX) + peep2_current_count++; peep2_insn_data[peep2_current].insn = insn; propagate_one_insn (pbi, insn); COPY_REG_SET (peep2_insn_data[peep2_current].live_before, live); - /* Match the peephole. */ - try = peephole2_insns (PATTERN (insn), insn, &match_len); + if (RTX_FRAME_RELATED_P (insn)) + { + /* If an insn has RTX_FRAME_RELATED_P set, peephole + substitution would lose the + REG_FRAME_RELATED_EXPR that is attached. */ + peep2_current_count = 0; + try = NULL; + } + else + /* Match the peephole. */ + try = peephole2_insns (PATTERN (insn), insn, &match_len); + if (try != NULL) { /* If we are splitting a CALL_INSN, look for the CALL_INSN @@ -3189,20 +3158,19 @@ peephole2_optimize (dump_file) if (j >= MAX_INSNS_PER_PEEP2 + 1) j -= MAX_INSNS_PER_PEEP2 + 1; old_insn = peep2_insn_data[j].insn; - if (GET_CODE (old_insn) != CALL_INSN) + if (!CALL_P (old_insn)) continue; was_call = true; new_insn = try; while (new_insn != NULL_RTX) { - if (GET_CODE (new_insn) == CALL_INSN) + if (CALL_P (new_insn)) break; new_insn = NEXT_INSN (new_insn); } - if (new_insn == NULL_RTX) - abort (); + gcc_assert (new_insn != NULL_RTX); CALL_INSN_FUNCTION_USAGE (new_insn) = CALL_INSN_FUNCTION_USAGE (old_insn); @@ -3214,7 +3182,6 @@ peephole2_optimize (dump_file) { case REG_NORETURN: case REG_SETJMP: - case REG_ALWAYS_RETURN: REG_NOTES (new_insn) = gen_rtx_EXPR_LIST (REG_NOTE_KIND (note), XEXP (note, 0), @@ -3231,8 +3198,7 @@ peephole2_optimize (dump_file) if (j >= MAX_INSNS_PER_PEEP2 + 1) j -= MAX_INSNS_PER_PEEP2 + 1; old_insn = peep2_insn_data[j].insn; - if (GET_CODE (old_insn) == CALL_INSN) - abort (); + gcc_assert (!CALL_P (old_insn)); } break; } @@ -3245,8 +3211,8 @@ peephole2_optimize (dump_file) REG_EH_REGION, NULL_RTX); /* Replace the old sequence with the new. */ - try = emit_insn_after_scope (try, peep2_insn_data[i].insn, - INSN_SCOPE (peep2_insn_data[i].insn)); + try = emit_insn_after_setloc (try, peep2_insn_data[i].insn, + INSN_LOCATOR (peep2_insn_data[i].insn)); before_try = PREV_INSN (insn); delete_insn_chain (insn, peep2_insn_data[i].insn); @@ -3254,14 +3220,14 @@ peephole2_optimize (dump_file) if (note || (was_call && nonlocal_goto_handler_labels)) { edge eh_edge; + edge_iterator ei; - for (eh_edge = bb->succ; eh_edge - ; eh_edge = eh_edge->succ_next) + FOR_EACH_EDGE (eh_edge, ei, bb->succs) if (eh_edge->flags & (EDGE_EH | EDGE_ABNORMAL_CALL)) break; for (x = try ; x != before_try ; x = PREV_INSN (x)) - if (GET_CODE (x) == CALL_INSN + if (CALL_P (x) || (flag_non_call_exceptions && may_trap_p (PATTERN (x)) && !find_reg_note (x, REG_EH_REGION, NULL))) @@ -3272,7 +3238,7 @@ peephole2_optimize (dump_file) XEXP (note, 0), REG_NOTES (x)); - if (x != bb->end && eh_edge) + if (x != BB_END (bb) && eh_edge) { edge nfte, nehe; int flags; @@ -3280,7 +3246,7 @@ peephole2_optimize (dump_file) nfte = split_block (bb, x); flags = (eh_edge->flags & (EDGE_EH | EDGE_ABNORMAL)); - if (GET_CODE (x) == CALL_INSN) + if (CALL_P (x)) flags |= EDGE_ABNORMAL_CALL; nehe = make_edge (nfte->src, eh_edge->dest, flags); @@ -3316,6 +3282,7 @@ peephole2_optimize (dump_file) for (i = 0; i < MAX_INSNS_PER_PEEP2 + 1; ++i) peep2_insn_data[i].insn = NULL_RTX; peep2_insn_data[peep2_current].insn = PEEP2_EOB; + peep2_current_count = 0; #else /* Back up lifetime information past the end of the newly created sequence. */ @@ -3331,6 +3298,9 @@ peephole2_optimize (dump_file) { if (--i < 0) i = MAX_INSNS_PER_PEEP2; + if (peep2_current_count < MAX_INSNS_PER_PEEP2 + && peep2_insn_data[i].insn == NULL_RTX) + peep2_current_count++; peep2_insn_data[i].insn = x; propagate_one_insn (pbi, x); COPY_REG_SET (peep2_insn_data[i].live_before, live); @@ -3348,7 +3318,7 @@ peephole2_optimize (dump_file) /* If we generated a jump instruction, it won't have JUMP_LABEL set. Recompute after we're done. */ for (x = try; x != before_try; x = PREV_INSN (x)) - if (GET_CODE (x) == JUMP_INSN) + if (JUMP_P (x)) { do_rebuild_jump_labels = true; break; @@ -3356,10 +3326,19 @@ peephole2_optimize (dump_file) } } - if (insn == bb->head) + if (insn == BB_HEAD (bb)) break; } + /* Some peepholes can decide the don't need one or more of their + inputs. If this happens, local life update is not enough. */ + EXECUTE_IF_AND_COMPL_IN_BITMAP (bb->il.rtl->global_live_at_start, live, + 0, j, rsi) + { + do_global_life_update = true; + break; + } + free_propagate_block_info (pbi); } @@ -3376,8 +3355,10 @@ peephole2_optimize (dump_file) if (do_cleanup_cfg) { cleanup_cfg (0); - update_life_info (0, UPDATE_LIFE_GLOBAL_RM_NOTES, PROP_DEATH_NOTES); + do_global_life_update = true; } + if (do_global_life_update) + update_life_info (0, UPDATE_LIFE_GLOBAL_RM_NOTES, PROP_DEATH_NOTES); #ifdef HAVE_conditional_execution else { @@ -3392,51 +3373,92 @@ peephole2_optimize (dump_file) /* Common predicates for use with define_bypass. */ /* True if the dependency between OUT_INSN and IN_INSN is on the store - data not the address operand(s) of the store. IN_INSN must be - single_set. OUT_INSN must be either a single_set or a PARALLEL with - SETs inside. */ + data not the address operand(s) of the store. IN_INSN and OUT_INSN + must be either a single_set or a PARALLEL with SETs inside. */ int -store_data_bypass_p (out_insn, in_insn) - rtx out_insn, in_insn; +store_data_bypass_p (rtx out_insn, rtx in_insn) { rtx out_set, in_set; + rtx out_pat, in_pat; + rtx out_exp, in_exp; + int i, j; in_set = single_set (in_insn); - if (! in_set) - abort (); - - if (GET_CODE (SET_DEST (in_set)) != MEM) - return false; - - out_set = single_set (out_insn); - if (out_set) + if (in_set) { - if (reg_mentioned_p (SET_DEST (out_set), SET_DEST (in_set))) + if (!MEM_P (SET_DEST (in_set))) return false; + + out_set = single_set (out_insn); + if (out_set) + { + if (reg_mentioned_p (SET_DEST (out_set), SET_DEST (in_set))) + return false; + } + else + { + out_pat = PATTERN (out_insn); + + if (GET_CODE (out_pat) != PARALLEL) + return false; + + for (i = 0; i < XVECLEN (out_pat, 0); i++) + { + out_exp = XVECEXP (out_pat, 0, i); + + if (GET_CODE (out_exp) == CLOBBER) + continue; + + gcc_assert (GET_CODE (out_exp) == SET); + + if (reg_mentioned_p (SET_DEST (out_exp), SET_DEST (in_set))) + return false; + } + } } else { - rtx out_pat; - int i; - - out_pat = PATTERN (out_insn); - if (GET_CODE (out_pat) != PARALLEL) - abort (); + in_pat = PATTERN (in_insn); + gcc_assert (GET_CODE (in_pat) == PARALLEL); - for (i = 0; i < XVECLEN (out_pat, 0); i++) + for (i = 0; i < XVECLEN (in_pat, 0); i++) { - rtx exp = XVECEXP (out_pat, 0, i); + in_exp = XVECEXP (in_pat, 0, i); - if (GET_CODE (exp) == CLOBBER) + if (GET_CODE (in_exp) == CLOBBER) continue; - if (GET_CODE (exp) != SET) - abort (); + gcc_assert (GET_CODE (in_exp) == SET); - if (reg_mentioned_p (SET_DEST (exp), SET_DEST (in_set))) + if (!MEM_P (SET_DEST (in_exp))) return false; - } + + out_set = single_set (out_insn); + if (out_set) + { + if (reg_mentioned_p (SET_DEST (out_set), SET_DEST (in_exp))) + return false; + } + else + { + out_pat = PATTERN (out_insn); + gcc_assert (GET_CODE (out_pat) == PARALLEL); + + for (j = 0; j < XVECLEN (out_pat, 0); j++) + { + out_exp = XVECEXP (out_pat, 0, j); + + if (GET_CODE (out_exp) == CLOBBER) + continue; + + gcc_assert (GET_CODE (out_exp) == SET); + + if (reg_mentioned_p (SET_DEST (out_exp), SET_DEST (in_exp))) + return false; + } + } + } } return true; @@ -3448,17 +3470,15 @@ store_data_bypass_p (out_insn, in_insn) of insn categorization may be any JUMP or CALL insn. */ int -if_test_bypass_p (out_insn, in_insn) - rtx out_insn, in_insn; +if_test_bypass_p (rtx out_insn, rtx in_insn) { rtx out_set, in_set; in_set = single_set (in_insn); if (! in_set) { - if (GET_CODE (in_insn) == JUMP_INSN || GET_CODE (in_insn) == CALL_INSN) - return false; - abort (); + gcc_assert (JUMP_P (in_insn) || CALL_P (in_insn)); + return false; } if (GET_CODE (SET_SRC (in_set)) != IF_THEN_ELSE) @@ -3478,8 +3498,7 @@ if_test_bypass_p (out_insn, in_insn) int i; out_pat = PATTERN (out_insn); - if (GET_CODE (out_pat) != PARALLEL) - abort (); + gcc_assert (GET_CODE (out_pat) == PARALLEL); for (i = 0; i < XVECLEN (out_pat, 0); i++) { @@ -3488,8 +3507,7 @@ if_test_bypass_p (out_insn, in_insn) if (GET_CODE (exp) == CLOBBER) continue; - if (GET_CODE (exp) != SET) - abort (); + gcc_assert (GET_CODE (exp) == SET); if (reg_mentioned_p (SET_DEST (out_set), XEXP (in_set, 1)) || reg_mentioned_p (SET_DEST (out_set), XEXP (in_set, 2))) @@ -3499,3 +3517,124 @@ if_test_bypass_p (out_insn, in_insn) return true; } + +static bool +gate_handle_peephole2 (void) +{ + return (optimize > 0 && flag_peephole2); +} + +static unsigned int +rest_of_handle_peephole2 (void) +{ +#ifdef HAVE_peephole2 + peephole2_optimize (); +#endif + return 0; +} + +struct tree_opt_pass pass_peephole2 = +{ + "peephole2", /* name */ + gate_handle_peephole2, /* gate */ + rest_of_handle_peephole2, /* execute */ + NULL, /* sub */ + NULL, /* next */ + 0, /* static_pass_number */ + TV_PEEPHOLE2, /* tv_id */ + 0, /* properties_required */ + 0, /* properties_provided */ + 0, /* properties_destroyed */ + 0, /* todo_flags_start */ + TODO_dump_func, /* todo_flags_finish */ + 'z' /* letter */ +}; + +static unsigned int +rest_of_handle_split_all_insns (void) +{ + split_all_insns (1); + return 0; +} + +struct tree_opt_pass pass_split_all_insns = +{ + "split1", /* name */ + NULL, /* gate */ + rest_of_handle_split_all_insns, /* execute */ + NULL, /* sub */ + NULL, /* next */ + 0, /* static_pass_number */ + 0, /* tv_id */ + 0, /* properties_required */ + 0, /* properties_provided */ + 0, /* properties_destroyed */ + 0, /* todo_flags_start */ + TODO_dump_func, /* todo_flags_finish */ + 0 /* letter */ +}; + +/* The placement of the splitting that we do for shorten_branches + depends on whether regstack is used by the target or not. */ +static bool +gate_do_final_split (void) +{ +#if defined (HAVE_ATTR_length) && !defined (STACK_REGS) + return 1; +#else + return 0; +#endif +} + +struct tree_opt_pass pass_split_for_shorten_branches = +{ + "split3", /* name */ + gate_do_final_split, /* gate */ + split_all_insns_noflow, /* execute */ + NULL, /* sub */ + NULL, /* next */ + 0, /* static_pass_number */ + TV_SHORTEN_BRANCH, /* tv_id */ + 0, /* properties_required */ + 0, /* properties_provided */ + 0, /* properties_destroyed */ + 0, /* todo_flags_start */ + TODO_dump_func, /* todo_flags_finish */ + 0 /* letter */ +}; + + +static bool +gate_handle_split_before_regstack (void) +{ +#if defined (HAVE_ATTR_length) && defined (STACK_REGS) + /* If flow2 creates new instructions which need splitting + and scheduling after reload is not done, they might not be + split until final which doesn't allow splitting + if HAVE_ATTR_length. */ +# ifdef INSN_SCHEDULING + return (optimize && !flag_schedule_insns_after_reload); +# else + return (optimize); +# endif +#else + return 0; +#endif +} + +struct tree_opt_pass pass_split_before_regstack = +{ + "split2", /* name */ + gate_handle_split_before_regstack, /* gate */ + rest_of_handle_split_all_insns, /* execute */ + NULL, /* sub */ + NULL, /* next */ + 0, /* static_pass_number */ + TV_SHORTEN_BRANCH, /* tv_id */ + 0, /* properties_required */ + 0, /* properties_provided */ + 0, /* properties_destroyed */ + 0, /* todo_flags_start */ + TODO_dump_func, /* todo_flags_finish */ + 0 /* letter */ +};