X-Git-Url: http://git.sourceforge.jp/view?a=blobdiff_plain;f=gcc%2Fjump.c;h=27a382a2b721dc6bc87aafa444d669666239cb27;hb=0d402b14c93c761abb0b3cca85ae40a9a5099e3d;hp=3ce95201c024e3344e2f0fc8b508e15d71d9403a;hpb=013be3e42ace1b1c511e022c6fb5db2442c1d372;p=pf3gnuchains%2Fgcc-fork.git diff --git a/gcc/jump.c b/gcc/jump.c index 3ce95201c02..27a382a2b72 100644 --- a/gcc/jump.c +++ b/gcc/jump.c @@ -1,5 +1,5 @@ /* Optimize jump instructions, for GNU compiler. - Copyright (C) 1987, 88, 89, 91-95, 1996 Free Software Foundation, Inc.b + Copyright (C) 1987, 88, 89, 91-95, 1996 Free Software Foundation, Inc. This file is part of GNU CC. @@ -449,28 +449,39 @@ jump_optimize (f, cross_jump, noop_moves, after_regscan) sreg, NULL_PTR, dreg, GET_MODE (SET_SRC (body))); -#ifdef PRESERVE_DEATH_INFO_REGNO_P - /* Deleting insn could lose a death-note for SREG or DREG - so don't do it if final needs accurate death-notes. */ - if (! PRESERVE_DEATH_INFO_REGNO_P (sreg) - && ! PRESERVE_DEATH_INFO_REGNO_P (dreg)) -#endif + if (tem != 0 && + GET_MODE (tem) == GET_MODE (SET_DEST (body))) { /* DREG may have been the target of a REG_DEAD note in the insn which makes INSN redundant. If so, reorg would still think it is dead. So search for such a note and delete it if we find it. */ - for (trial = prev_nonnote_insn (insn); - trial && GET_CODE (trial) != CODE_LABEL; - trial = prev_nonnote_insn (trial)) - if (find_regno_note (trial, REG_DEAD, dreg)) - { - remove_death (dreg, trial); - break; - } - - if (tem != 0 - && GET_MODE (tem) == GET_MODE (SET_DEST (body))) + if (! find_regno_note (insn, REG_UNUSED, dreg)) + for (trial = prev_nonnote_insn (insn); + trial && GET_CODE (trial) != CODE_LABEL; + trial = prev_nonnote_insn (trial)) + if (find_regno_note (trial, REG_DEAD, dreg)) + { + remove_death (dreg, trial); + break; + } +#ifdef PRESERVE_DEATH_INFO_REGNO_P + /* Deleting insn could lose a death-note for SREG + so don't do it if final needs accurate + death-notes. */ + if (PRESERVE_DEATH_INFO_REGNO_P (sreg) + && (trial = find_regno_note (insn, REG_DEAD, sreg))) + { + /* Change this into a USE so that we won't emit + code for it, but still can keep the note. */ + PATTERN (insn) + = gen_rtx (USE, VOIDmode, XEXP (trial, 0)); + /* Remove all reg notes but the REG_DEAD one. */ + REG_NOTES (insn) = trial; + XEXP (trial, 1) = NULL_RTX; + } + else +#endif delete_insn (insn); } } @@ -852,6 +863,98 @@ jump_optimize (f, cross_jump, noop_moves, after_regscan) } } + /* Simplify if (...) { x = a; goto l; } x = b; by converting it + to x = a; if (...) goto l; x = b; + if A is sufficiently simple, the test doesn't involve X, + and nothing in the test modifies A or X. + + If we have small register classes, we also can't do this if X + is a hard register. + + If the "x = a;" insn has any REG_NOTES, we don't do this because + of the possibility that we are running after CSE and there is a + REG_EQUAL note that is only valid if the branch has already been + taken. If we move the insn with the REG_EQUAL note, we may + fold the comparison to always be false in a later CSE pass. + (We could also delete the REG_NOTES when moving the insn, but it + seems simpler to not move it.) An exception is that we can move + the insn if the only note is a REG_EQUAL or REG_EQUIV whose + value is the same as "a". + + INSN is the goto. + + We set: + + TEMP to the jump insn preceding "x = a;" + TEMP1 to X + TEMP2 to the insn that sets "x = b;" + TEMP3 to the insn that sets "x = a;" + TEMP4 to the set of "x = a"; */ + + if (this_is_simplejump + && (temp2 = next_active_insn (insn)) != 0 + && GET_CODE (temp2) == INSN + && (temp4 = single_set (temp2)) != 0 + && GET_CODE (temp1 = SET_DEST (temp4)) == REG +#ifdef SMALL_REGISTER_CLASSES + && REGNO (temp1) >= FIRST_PSEUDO_REGISTER +#endif + + && (temp3 = prev_active_insn (insn)) != 0 + && GET_CODE (temp3) == INSN + && (temp4 = single_set (temp3)) != 0 + && rtx_equal_p (SET_DEST (temp4), temp1) + && (GET_CODE (SET_SRC (temp4)) == REG + || GET_CODE (SET_SRC (temp4)) == SUBREG + || CONSTANT_P (SET_SRC (temp4))) + && (REG_NOTES (temp3) == 0 + || ((REG_NOTE_KIND (REG_NOTES (temp3)) == REG_EQUAL + || REG_NOTE_KIND (REG_NOTES (temp3)) == REG_EQUIV) + && XEXP (REG_NOTES (temp3), 1) == 0 + && rtx_equal_p (XEXP (REG_NOTES (temp3), 0), + SET_SRC (temp4)))) + && (temp = prev_active_insn (temp3)) != 0 + && condjump_p (temp) && ! simplejump_p (temp) + /* TEMP must skip over the "x = a;" insn */ + && prev_real_insn (JUMP_LABEL (temp)) == insn + && no_labels_between_p (temp, insn)) + { + rtx prev_label = JUMP_LABEL (temp); + rtx insert_after = prev_nonnote_insn (temp); + +#ifdef HAVE_cc0 + /* We cannot insert anything between a set of cc and its use. */ + if (insert_after && GET_RTX_CLASS (GET_CODE (insert_after)) == 'i' + && sets_cc0_p (PATTERN (insert_after))) + insert_after = prev_nonnote_insn (insert_after); +#endif + ++LABEL_NUSES (prev_label); + + if (insert_after + && no_labels_between_p (insert_after, temp) + && ! reg_referenced_between_p (temp1, insert_after, temp3) + && ! reg_referenced_between_p (temp1, temp3, + NEXT_INSN (temp2)) + && ! reg_set_between_p (temp1, insert_after, temp) + && (GET_CODE (SET_SRC (temp4)) == CONST_INT + || ! reg_set_between_p (SET_SRC (temp4), + insert_after, temp)) + && invert_jump (temp, JUMP_LABEL (insn))) + { + emit_insn_after_with_line_notes (PATTERN (temp3), + insert_after, temp3); + delete_insn (temp3); + delete_insn (insn); + /* Set NEXT to an insn that we know won't go away. */ + next = temp2; + changed = 1; + } + if (prev_label && --LABEL_NUSES (prev_label) == 0) + delete_insn (prev_label); + if (changed) + continue; + } + #ifndef HAVE_cc0 /* If we have if (...) x = exp; and branches are expensive, EXP is a single insn, does not have any side effects, cannot @@ -1064,13 +1167,9 @@ jump_optimize (f, cross_jump, noop_moves, after_regscan) We could handle BLKmode if (1) emit_store_flag could and (2) we could find the size reliably. */ && GET_MODE (XEXP (temp4, 0)) != BLKmode - /* No point in doing any of this if branches are cheap or we - don't have conditional moves. */ - && (BRANCH_COST >= 2 -#ifdef HAVE_conditional_move - || 1 -#endif - ) + /* Even if branches are cheap, the store_flag optimization + can win when the operation to be performed can be + expressed directly. */ #ifdef HAVE_cc0 /* If the previous insn sets CC0 and something else, we can't do this since we are going to delete that insn. */ @@ -1181,8 +1280,19 @@ jump_optimize (f, cross_jump, noop_moves, after_regscan) can reverse the condition. See if (3) applies possibly by reversing the condition. Prefer reversing to (4) when branches are very expensive. */ - && ((reversep = 0, temp2 == const0_rtx) - || (temp3 == const0_rtx + && (((BRANCH_COST >= 2 + || STORE_FLAG_VALUE == -1 + || (STORE_FLAG_VALUE == 1 + /* Check that the mask is a power of two, + so that it can probably be generated + with a shift. */ + && exact_log2 (INTVAL (temp3)) >= 0)) + && (reversep = 0, temp2 == const0_rtx)) + || ((BRANCH_COST >= 2 + || STORE_FLAG_VALUE == -1 + || (STORE_FLAG_VALUE == 1 + && exact_log2 (INTVAL (temp2)) >= 0)) + && temp3 == const0_rtx && (reversep = can_reverse_comparison_p (temp4, insn))) || (BRANCH_COST >= 2 && GET_CODE (temp2) == CONST_INT