X-Git-Url: http://git.sourceforge.jp/view?a=blobdiff_plain;f=gcc%2Freorg.c;h=da4a1a03e83691d474f43a9561a69a6d557e4847;hb=ed8aa6928fbae14e64748e577a240e0c83d2a7d8;hp=cab9d46ab75029465645c0d7a5be83f79d62b486;hpb=ea1d1c38832bfcde3644908d3dbd8f94f578669e;p=pf3gnuchains%2Fgcc-fork.git diff --git a/gcc/reorg.c b/gcc/reorg.c index cab9d46ab75..da4a1a03e83 100644 --- a/gcc/reorg.c +++ b/gcc/reorg.c @@ -317,6 +317,20 @@ insn_sets_resource_p (rtx insn, struct resources *res, mark_set_resources (insn, &insn_sets, 0, include_delayed_effects); return resource_conflicts_p (&insn_sets, res); } + +/* Return TRUE if INSN is a return, possibly with a filled delay slot. */ + +static bool +return_insn_p (rtx insn) +{ + if (GET_CODE (insn) == JUMP_INSN && GET_CODE (PATTERN (insn)) == RETURN) + return true; + + if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE) + return return_insn_p (XVECEXP (PATTERN (insn), 0, 0)); + + return false; +} /* Find a label at the end of the function or before a RETURN. If there is none, make one. */ @@ -344,15 +358,13 @@ find_end_label (void) /* When a target threads its epilogue we might already have a suitable return insn. If so put a label before it for the end_of_function_label. */ - if (GET_CODE (insn) == BARRIER - && GET_CODE (PREV_INSN (insn)) == JUMP_INSN - && GET_CODE (PATTERN (PREV_INSN (insn))) == RETURN) + if (GET_CODE (insn) == BARRIER && return_insn_p (PREV_INSN (insn))) { rtx temp = PREV_INSN (PREV_INSN (insn)); end_of_function_label = gen_label_rtx (); LABEL_NUSES (end_of_function_label) = 0; - /* Put the label before an USE insns that may proceed the RETURN insn. */ + /* Put the label before an USE insn that may precede the RETURN insn. */ while (GET_CODE (temp) == USE) temp = PREV_INSN (temp); @@ -368,8 +380,7 @@ find_end_label (void) /* If the basic block reorder pass moves the return insn to some other place try to locate it again and put our end_of_function_label there. */ - while (insn && ! (GET_CODE (insn) == JUMP_INSN - && (GET_CODE (PATTERN (insn)) == RETURN))) + while (insn && ! return_insn_p (insn)) insn = PREV_INSN (insn); if (insn) { @@ -570,6 +581,7 @@ delete_from_delay_slot (rtx insn) rtx trial, seq_insn, seq, prev; rtx delay_list = 0; int i; + int had_barrier = 0; /* We first must find the insn containing the SEQUENCE with INSN in its delay slot. Do this by finding an insn, TRIAL, where @@ -583,6 +595,9 @@ delete_from_delay_slot (rtx insn) seq_insn = PREV_INSN (NEXT_INSN (trial)); seq = PATTERN (seq_insn); + if (NEXT_INSN (seq_insn) && GET_CODE (NEXT_INSN (seq_insn)) == BARRIER) + had_barrier = 1; + /* Create a delay list consisting of all the insns other than the one we are deleting (unless we were the only one). */ if (XVECLEN (seq, 0) > 2) @@ -597,8 +612,8 @@ delete_from_delay_slot (rtx insn) delete_related_insns (seq_insn); add_insn_after (trial, prev); - if (GET_CODE (trial) == JUMP_INSN - && (simplejump_p (trial) || GET_CODE (PATTERN (trial)) == RETURN)) + /* If there was a barrier after the old SEQUENCE, remit it. */ + if (had_barrier) emit_barrier_after (trial); /* If there are any delay insns, remit them. Otherwise clear the @@ -1920,7 +1935,7 @@ update_reg_dead_notes (rtx insn, rtx delayed_insn) next = XEXP (link, 1); if (REG_NOTE_KIND (link) != REG_DEAD - || GET_CODE (XEXP (link, 0)) != REG) + || !REG_P (XEXP (link, 0))) continue; if (reg_referenced_p (XEXP (link, 0), PATTERN (insn))) @@ -1953,7 +1968,7 @@ fix_reg_dead_note (rtx start_insn, rtx stop_insn) next = XEXP (link, 1); if (REG_NOTE_KIND (link) != REG_DEAD - || GET_CODE (XEXP (link, 0)) != REG) + || !REG_P (XEXP (link, 0))) continue; if (reg_set_p (XEXP (link, 0), PATTERN (start_insn))) @@ -1981,7 +1996,7 @@ update_reg_unused_notes (rtx insn, rtx redundant_insn) next = XEXP (link, 1); if (REG_NOTE_KIND (link) != REG_UNUSED - || GET_CODE (XEXP (link, 0)) != REG) + || !REG_P (XEXP (link, 0))) continue; if (! find_regno_note (redundant_insn, REG_UNUSED, @@ -2349,7 +2364,9 @@ fill_simple_delay_slots (int non_jumps_p) && eligible_for_delay (insn, slots_filled, next_trial, flags) && ! can_throw_internal (trial)) { - rtx new_label = next_active_insn (next_trial); + /* See comment in relax_delay_slots about necessity of using + next_real_insn here. */ + rtx new_label = next_real_insn (next_trial); if (new_label != 0) new_label = get_label_before (new_label); @@ -2758,8 +2775,8 @@ fill_slots_from_thread (rtx insn, rtx condition, rtx thread, destination. Overlap may happen for larger-than-register-size modes. */ if (GET_CODE (trial) == INSN && GET_CODE (pat) == SET - && GET_CODE (SET_SRC (pat)) == REG - && GET_CODE (SET_DEST (pat)) == REG + && REG_P (SET_SRC (pat)) + && REG_P (SET_DEST (pat)) && !reg_overlap_mentioned_p (SET_DEST (pat), SET_SRC (pat))) { rtx next = next_nonnote_insn (trial); @@ -3073,9 +3090,7 @@ relax_delay_slots (rtx first) && (condjump_p (insn) || condjump_in_parallel_p (insn)) && (target_label = JUMP_LABEL (insn)) != 0) { - target_label = follow_jumps (target_label); - target_label = prev_label (next_active_insn (target_label)); - + target_label = skip_consecutive_labels (follow_jumps (target_label)); if (target_label == 0) target_label = find_end_label (); @@ -3170,7 +3185,7 @@ relax_delay_slots (rtx first) /* See if we have a RETURN insn with a filled delay slot followed by a RETURN insn with an unfilled a delay slot. If so, we can delete - the first RETURN (but not it's delay insn). This gives the same + the first RETURN (but not its delay insn). This gives the same effect in fewer instructions. Only do so if optimizing for size since this results in slower, but @@ -3223,14 +3238,8 @@ relax_delay_slots (rtx first) { /* If this jump goes to another unconditional jump, thread it, but don't convert a jump into a RETURN here. */ - trial = follow_jumps (target_label); - /* We use next_real_insn instead of next_active_insn, so that - the special USE insns emitted by reorg won't be ignored. - If they are ignored, then they will get deleted if target_label - is now unreachable, and that would cause mark_target_live_regs - to fail. */ - trial = prev_label (next_real_insn (trial)); - if (trial == 0 && target_label != 0) + trial = skip_consecutive_labels (follow_jumps (target_label)); + if (trial == 0) trial = find_end_label (); if (trial != target_label @@ -3277,10 +3286,18 @@ relax_delay_slots (rtx first) { target_label = JUMP_LABEL (XVECEXP (PATTERN (trial), 0, 0)); if (target_label == 0) - target_label = find_end_label (); + { + target_label = find_end_label (); + /* The following condition may be true if TRIAL contains + the unique RETURN. In this case, threading would be + a nop and we would enter an infinite loop if we did it. */ + if (next_active_insn (target_label) == trial) + target_label = 0; + } - if (redirect_with_delay_slots_safe_p (delay_insn, target_label, - insn)) + if (target_label + && redirect_with_delay_slots_safe_p (delay_insn, target_label, + insn)) { reorg_redirect_jump (delay_insn, target_label); next = insn; @@ -3613,7 +3630,7 @@ dbr_schedule (rtx first, FILE *file) if (GET_CODE (insn) == JUMP_INSN && (condjump_p (insn) || condjump_in_parallel_p (insn)) && JUMP_LABEL (insn) != 0 - && ((target = prev_label (next_active_insn (JUMP_LABEL (insn)))) + && ((target = skip_consecutive_labels (JUMP_LABEL (insn))) != JUMP_LABEL (insn))) redirect_jump (insn, target, 1); }