X-Git-Url: http://git.sourceforge.jp/view?a=blobdiff_plain;f=gcc%2Floop.c;h=6df63026ab833a88b3542239997b512037d65c18;hb=f37ca8000c2c0db7e030a3e0e5cf49b74d30769b;hp=bc6ddec97a2bae4a8c527231d14a516439994517;hpb=ca8cce9fc2a2367248ffe52d526ce0bfbb1ed71b;p=pf3gnuchains%2Fgcc-fork.git diff --git a/gcc/loop.c b/gcc/loop.c index bc6ddec97a2..6df63026ab8 100644 --- a/gcc/loop.c +++ b/gcc/loop.c @@ -1266,12 +1266,13 @@ scan_loop (struct loop *loop, int flags) { struct movable *m; int regno = REGNO (SET_DEST (set)); + rtx user, user_set; - /* A potential lossage is where we have a case where two insns - can be combined as long as they are both in the loop, but - we move one of them outside the loop. For large loops, - this can lose. The most common case of this is the address - of a function being called. + /* A potential lossage is where we have a case where two + insns can be combined as long as they are both in the + loop, but we move one of them outside the loop. For + large loops, this can lose. The most common case of + this is the address of a function being called. Therefore, if this register is marked as being used exactly once if we are in a loop with calls @@ -1279,41 +1280,44 @@ scan_loop (struct loop *loop, int flags) this register with the source of this SET. If we can, delete this insn. - Don't do this if P has a REG_RETVAL note or if we have - SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */ + Don't do this if: + (1) P has a REG_RETVAL note or + (2) if we have SMALL_REGISTER_CLASSES and + (a) SET_SRC is a hard register or + (b) the destination of the user is a hard register. */ if (loop_info->has_call - && regs->array[regno].single_usage != 0 - && regs->array[regno].single_usage != const0_rtx + && regno >= FIRST_PSEUDO_REGISTER + && (user = regs->array[regno].single_usage) != NULL + && user != const0_rtx && REGNO_FIRST_UID (regno) == INSN_UID (p) - && (REGNO_LAST_UID (regno) - == INSN_UID (regs->array[regno].single_usage)) + && REGNO_LAST_UID (regno) == INSN_UID (user) && regs->array[regno].set_in_loop == 1 && GET_CODE (SET_SRC (set)) != ASM_OPERANDS && ! side_effects_p (SET_SRC (set)) && ! find_reg_note (p, REG_RETVAL, NULL_RTX) - && (! SMALL_REGISTER_CLASSES - || (! (REG_P (SET_SRC (set)) - && (REGNO (SET_SRC (set)) - < FIRST_PSEUDO_REGISTER)))) - && regno >= FIRST_PSEUDO_REGISTER + && (!SMALL_REGISTER_CLASSES + || !REG_P (SET_SRC (set)) + || !HARD_REGISTER_P (SET_SRC (set))) + && (!SMALL_REGISTER_CLASSES + || !NONJUMP_INSN_P (user) + || !(user_set = single_set (user)) + || !REG_P (SET_DEST (user_set)) + || !HARD_REGISTER_P (SET_DEST (user_set))) /* This test is not redundant; SET_SRC (set) might be a call-clobbered register and the life of REGNO might span a call. */ - && ! modified_between_p (SET_SRC (set), p, - regs->array[regno].single_usage) - && no_labels_between_p (p, - regs->array[regno].single_usage) - && validate_replace_rtx (SET_DEST (set), SET_SRC (set), - regs->array[regno].single_usage)) + && ! modified_between_p (SET_SRC (set), p, user) + && no_labels_between_p (p, user) + && validate_replace_rtx (SET_DEST (set), + SET_SRC (set), user)) { /* Replace any usage in a REG_EQUAL note. Must copy the new source, so that we don't get rtx sharing between the SET_SOURCE and REG_NOTES of insn p. */ - REG_NOTES (regs->array[regno].single_usage) - = (replace_rtx - (REG_NOTES (regs->array[regno].single_usage), - SET_DEST (set), copy_rtx (SET_SRC (set)))); + REG_NOTES (user) + = replace_rtx (REG_NOTES (user), SET_DEST (set), + copy_rtx (SET_SRC (set))); delete_insn (p); for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); @@ -5493,9 +5497,16 @@ loop_givs_rescan (struct loop *loop, struct iv_class *bl, rtx *reg_map) /* Not replaceable; emit an insn to set the original giv reg from the reduced giv. */ else if (REG_P (*v->location)) - loop_insn_emit_before (loop, 0, v->insn, - gen_move_insn (*v->location, - v->new_reg)); + { + rtx tem; + start_sequence (); + tem = force_operand (v->new_reg, *v->location); + if (tem != *v->location) + emit_move_insn (*v->location, tem); + tem = get_insns (); + end_sequence (); + loop_insn_emit_before (loop, 0, v->insn, tem); + } else if (GET_CODE (*v->location) == PLUS && REG_P (XEXP (*v->location, 0)) && CONSTANT_P (XEXP (*v->location, 1))) @@ -8800,6 +8811,63 @@ biv_fits_mode_p (const struct loop *loop, struct iv_class *bl, } +/* Return false iff it is provable that biv BL plus BIAS will not wrap + at any point in its update sequence. Note that at the rtl level we + may not have information about the signedness of BL; in that case, + check for both signed and unsigned overflow. */ + +static bool +biased_biv_may_wrap_p (const struct loop *loop, struct iv_class *bl, + unsigned HOST_WIDE_INT bias) +{ + HOST_WIDE_INT incr; + bool check_signed, check_unsigned; + enum machine_mode mode; + + /* If the increment is not monotonic, we'd have to check separately + at each increment step. Not Worth It. */ + incr = get_monotonic_increment (bl); + if (incr == 0) + return true; + + /* If this biv is the loop iteration variable, then we may be able to + deduce a sign based on the loop condition. */ + /* ??? This is not 100% reliable; consider an unsigned biv that is cast + to signed for the comparison. However, this same bug appears all + through loop.c. */ + check_signed = check_unsigned = true; + if (bl->biv->src_reg == LOOP_INFO (loop)->iteration_var) + { + switch (LOOP_INFO (loop)->comparison_code) + { + case GTU: case GEU: case LTU: case LEU: + check_signed = false; + break; + case GT: case GE: case LT: case LE: + check_unsigned = false; + break; + default: + break; + } + } + + mode = GET_MODE (bl->biv->src_reg); + + if (check_unsigned + && !biased_biv_fits_mode_p (loop, bl, incr, mode, bias)) + return true; + + if (check_signed) + { + bias += (GET_MODE_MASK (mode) >> 1) + 1; + if (!biased_biv_fits_mode_p (loop, bl, incr, mode, bias)) + return true; + } + + return false; +} + + /* Given that X is an extension or truncation of BL, return true if it is unaffected by overflow. LOOP is the loop to which BL belongs and INCR is its per-iteration increment. */ @@ -10210,195 +10278,56 @@ maybe_eliminate_biv_1 (const struct loop *loop, rtx x, rtx insn, else break; - if (CONSTANT_P (arg)) - { - /* First try to replace with any giv that has constant positive - mult_val and constant add_val. We might be able to support - negative mult_val, but it seems complex to do it in general. */ - - for (v = bl->giv; v; v = v->next_iv) - if (GET_CODE (v->mult_val) == CONST_INT - && INTVAL (v->mult_val) > 0 - && (GET_CODE (v->add_val) == SYMBOL_REF - || GET_CODE (v->add_val) == LABEL_REF - || GET_CODE (v->add_val) == CONST - || (REG_P (v->add_val) - && REG_POINTER (v->add_val))) - && ! v->ignore && ! v->maybe_dead && v->always_computable - && v->mode == mode) - { - if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn)) - continue; - - /* Don't eliminate if the linear combination that makes up - the giv overflows when it is applied to ARG. */ - if (GET_CODE (arg) == CONST_INT) - { - rtx add_val; - - if (GET_CODE (v->add_val) == CONST_INT) - add_val = v->add_val; - else - add_val = const0_rtx; - - if (const_mult_add_overflow_p (arg, v->mult_val, - add_val, mode, 1)) - continue; - } - - if (! eliminate_p) - return 1; - - /* Replace biv with the giv's reduced reg. */ - validate_change (insn, &XEXP (x, 1 - arg_operand), v->new_reg, 1); - - /* If all constants are actually constant integers and - the derived constant can be directly placed in the COMPARE, - do so. */ - if (GET_CODE (arg) == CONST_INT - && GET_CODE (v->add_val) == CONST_INT) - { - tem = expand_mult_add (arg, NULL_RTX, v->mult_val, - v->add_val, mode, 1); - } - else - { - /* Otherwise, load it into a register. */ - tem = gen_reg_rtx (mode); - loop_iv_add_mult_emit_before (loop, arg, - v->mult_val, v->add_val, - tem, where_bb, where_insn); - } - - validate_change (insn, &XEXP (x, arg_operand), tem, 1); - - if (apply_change_group ()) - return 1; - } - - /* Look for giv with positive constant mult_val and nonconst add_val. - Insert insns to calculate new compare value. - ??? Turn this off due to possible overflow. */ - - for (v = bl->giv; v; v = v->next_iv) - if (GET_CODE (v->mult_val) == CONST_INT - && INTVAL (v->mult_val) > 0 - && ! v->ignore && ! v->maybe_dead && v->always_computable - && v->mode == mode - && 0) - { - rtx tem; - - if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn)) - continue; - - if (! eliminate_p) - return 1; - - tem = gen_reg_rtx (mode); - - /* Replace biv with giv's reduced register. */ - validate_change (insn, &XEXP (x, 1 - arg_operand), - v->new_reg, 1); - - /* Compute value to compare against. */ - loop_iv_add_mult_emit_before (loop, arg, - v->mult_val, v->add_val, - tem, where_bb, where_insn); - /* Use it in this insn. */ - validate_change (insn, &XEXP (x, arg_operand), tem, 1); - if (apply_change_group ()) - return 1; - } - } - else if (REG_P (arg) || MEM_P (arg)) - { - if (loop_invariant_p (loop, arg) == 1) - { - /* Look for giv with constant positive mult_val and nonconst - add_val. Insert insns to compute new compare value. - ??? Turn this off due to possible overflow. */ - - for (v = bl->giv; v; v = v->next_iv) - if (GET_CODE (v->mult_val) == CONST_INT && INTVAL (v->mult_val) > 0 - && ! v->ignore && ! v->maybe_dead && v->always_computable - && v->mode == mode - && 0) - { - rtx tem; - - if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn)) - continue; - - if (! eliminate_p) - return 1; - - tem = gen_reg_rtx (mode); - - /* Replace biv with giv's reduced register. */ - validate_change (insn, &XEXP (x, 1 - arg_operand), - v->new_reg, 1); - - /* Compute value to compare against. */ - loop_iv_add_mult_emit_before (loop, arg, - v->mult_val, v->add_val, - tem, where_bb, where_insn); - validate_change (insn, &XEXP (x, arg_operand), tem, 1); - if (apply_change_group ()) - return 1; - } - } - - /* This code has problems. Basically, you can't know when - seeing if we will eliminate BL, whether a particular giv - of ARG will be reduced. If it isn't going to be reduced, - we can't eliminate BL. We can try forcing it to be reduced, - but that can generate poor code. + if (GET_CODE (arg) != CONST_INT) + return 0; - The problem is that the benefit of reducing TV, below should - be increased if BL can actually be eliminated, but this means - we might have to do a topological sort of the order in which - we try to process biv. It doesn't seem worthwhile to do - this sort of thing now. */ + /* Unless we're dealing with an equality comparison, if we can't + determine that the original biv doesn't wrap, then we must not + apply the transformation. */ + /* ??? Actually, what we must do is verify that the transformed + giv doesn't wrap. But the general case of this transformation + was disabled long ago due to wrapping problems, and there's no + point reviving it this close to end-of-life for loop.c. The + only case still enabled is known (via the check on add_val) to + be pointer arithmetic, which in theory never overflows for + valid programs. */ + /* Without lifetime analysis, we don't know how COMPARE will be + used, so we must assume the worst. */ + if (code != EQ && code != NE + && biased_biv_may_wrap_p (loop, bl, INTVAL (arg))) + return 0; -#if 0 - /* Otherwise the reg compared with had better be a biv. */ - if (!REG_P (arg) - || REG_IV_TYPE (ivs, REGNO (arg)) != BASIC_INDUCT) - return 0; + /* Try to replace with any giv that has constant positive mult_val + and a pointer add_val. */ + for (v = bl->giv; v; v = v->next_iv) + if (GET_CODE (v->mult_val) == CONST_INT + && INTVAL (v->mult_val) > 0 + && (GET_CODE (v->add_val) == SYMBOL_REF + || GET_CODE (v->add_val) == LABEL_REF + || GET_CODE (v->add_val) == CONST + || (REG_P (v->add_val) && REG_POINTER (v->add_val))) + && ! v->ignore && ! v->maybe_dead && v->always_computable + && v->mode == mode) + { + if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn)) + continue; - /* Look for a pair of givs, one for each biv, - with identical coefficients. */ - for (v = bl->giv; v; v = v->next_iv) - { - struct induction *tv; + if (! eliminate_p) + return 1; - if (v->ignore || v->maybe_dead || v->mode != mode) - continue; + /* Replace biv with the giv's reduced reg. */ + validate_change (insn, &XEXP (x, 1 - arg_operand), v->new_reg, 1); - for (tv = REG_IV_CLASS (ivs, REGNO (arg))->giv; tv; - tv = tv->next_iv) - if (! tv->ignore && ! tv->maybe_dead - && rtx_equal_p (tv->mult_val, v->mult_val) - && rtx_equal_p (tv->add_val, v->add_val) - && tv->mode == mode) - { - if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn)) - continue; + /* Load the value into a register. */ + tem = gen_reg_rtx (mode); + loop_iv_add_mult_emit_before (loop, arg, v->mult_val, v->add_val, + tem, where_bb, where_insn); - if (! eliminate_p) - return 1; + validate_change (insn, &XEXP (x, arg_operand), tem, 1); - /* Replace biv with its giv's reduced reg. */ - XEXP (x, 1 - arg_operand) = v->new_reg; - /* Replace other operand with the other giv's - reduced reg. */ - XEXP (x, arg_operand) = tv->new_reg; - return 1; - } - } -#endif - } + if (apply_change_group ()) + return 1; + } /* If we get here, the biv can't be eliminated. */ return 0;