X-Git-Url: http://git.sourceforge.jp/view?a=blobdiff_plain;f=gcc%2Freorg.c;h=43ba32194221d412518f5cb2c4e3b19f4701c856;hb=576aff9d272cd8ebec2b93b10dcf5eae125e9667;hp=2ae0a8f2259f57a77c6be28daf82f21b0494f86f;hpb=f17979aa28f0882c2083d518948e151dfdb2c81e;p=pf3gnuchains%2Fgcc-fork.git diff --git a/gcc/reorg.c b/gcc/reorg.c index 2ae0a8f2259..43ba3219422 100644 --- a/gcc/reorg.c +++ b/gcc/reorg.c @@ -1,6 +1,6 @@ /* Perform instruction reorganizations for delay slot filling. Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, - 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. + 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc. Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu). Hacked by Michael Tiemann (tiemann@cygnus.com). @@ -253,7 +253,7 @@ stop_search_p (rtx insn, int labels_p) || asm_noperands (PATTERN (insn)) >= 0); default: - abort (); + gcc_unreachable (); } } @@ -318,8 +318,24 @@ insn_sets_resource_p (rtx insn, struct resources *res, return resource_conflicts_p (&insn_sets, res); } -/* Find a label at the end of the function or before a RETURN. If there is - none, make one. */ +/* Find a label at the end of the function or before a RETURN. If there + is none, try to make one. If that fails, returns 0. + + The property of such a label is that it is placed just before the + epilogue or a bare RETURN insn, so that another bare RETURN can be + turned into a jump to the label unconditionally. In particular, the + label cannot be placed before a RETURN insn with a filled delay slot. + + ??? There may be a problem with the current implementation. Suppose + we start with a bare RETURN insn and call find_end_label. It may set + end_of_function_label just before the RETURN. Suppose the machinery + is able to fill the delay slot of the RETURN insn afterwards. Then + end_of_function_label is no longer valid according to the property + described above and find_end_label will still return it unmodified. + Note that this is probably mitigated by the following observation: + once end_of_function_label is made, it is very likely the target of + a jump, so filling the delay slot of the RETURN will be much more + difficult. */ static rtx find_end_label (void) @@ -335,8 +351,8 @@ find_end_label (void) label and we don't have to do anything else. */ insn = get_last_insn (); - while (GET_CODE (insn) == NOTE - || (GET_CODE (insn) == INSN + while (NOTE_P (insn) + || (NONJUMP_INSN_P (insn) && (GET_CODE (PATTERN (insn)) == USE || GET_CODE (PATTERN (insn)) == CLOBBER))) insn = PREV_INSN (insn); @@ -344,22 +360,22 @@ find_end_label (void) /* When a target threads its epilogue we might already have a suitable return insn. If so put a label before it for the end_of_function_label. */ - if (GET_CODE (insn) == BARRIER - && GET_CODE (PREV_INSN (insn)) == JUMP_INSN + if (BARRIER_P (insn) + && JUMP_P (PREV_INSN (insn)) && GET_CODE (PATTERN (PREV_INSN (insn))) == RETURN) { rtx temp = PREV_INSN (PREV_INSN (insn)); end_of_function_label = gen_label_rtx (); LABEL_NUSES (end_of_function_label) = 0; - /* Put the label before an USE insns that may proceed the RETURN insn. */ + /* Put the label before an USE insns that may precede the RETURN insn. */ while (GET_CODE (temp) == USE) temp = PREV_INSN (temp); emit_label_after (end_of_function_label, temp); } - else if (GET_CODE (insn) == CODE_LABEL) + else if (LABEL_P (insn)) end_of_function_label = insn; else { @@ -368,7 +384,7 @@ find_end_label (void) /* If the basic block reorder pass moves the return insn to some other place try to locate it again and put our end_of_function_label there. */ - while (insn && ! (GET_CODE (insn) == JUMP_INSN + while (insn && ! (JUMP_P (insn) && (GET_CODE (PATTERN (insn)) == RETURN))) insn = PREV_INSN (insn); if (insn) @@ -384,11 +400,32 @@ find_end_label (void) } else { +#ifdef HAVE_epilogue + if (HAVE_epilogue +#ifdef HAVE_return + && ! HAVE_return +#endif + ) + { + /* The RETURN insn has its delay slot filled so we cannot + emit the label just before it. Since we already have + an epilogue and cannot emit a new RETURN, we cannot + emit the label at all. */ + end_of_function_label = NULL_RTX; + return end_of_function_label; + } +#endif /* HAVE_epilogue */ + /* Otherwise, make a new label and emit a RETURN and BARRIER, if needed. */ emit_label (end_of_function_label); #ifdef HAVE_return - if (HAVE_return) + /* We don't bother trying to create a return insn if the + epilogue has filled delay-slots; we would have to try and + move the delay-slot fillers to the delay-slots for the new + return insn or in front of the new return insn. */ + if (current_function_epilogue_delay_list == NULL + && HAVE_return) { /* The return we make may have delay slots too. */ rtx insn = gen_return (); @@ -438,7 +475,7 @@ emit_delay_sequence (rtx insn, rtx list, int length) /* If INSN is followed by a BARRIER, delete the BARRIER since it will only confuse further processing. Update LAST in case it was the last insn. We will put the BARRIER back in later. */ - if (NEXT_INSN (insn) && GET_CODE (NEXT_INSN (insn)) == BARRIER) + if (NEXT_INSN (insn) && BARRIER_P (NEXT_INSN (insn))) { delete_related_insns (NEXT_INSN (insn)); last = get_last_insn (); @@ -502,7 +539,7 @@ emit_delay_sequence (rtx insn, rtx list, int length) case REG_LABEL: /* Keep the label reference count up to date. */ - if (GET_CODE (XEXP (note, 0)) == CODE_LABEL) + if (LABEL_P (XEXP (note, 0))) LABEL_NUSES (XEXP (note, 0)) ++; break; @@ -518,13 +555,13 @@ emit_delay_sequence (rtx insn, rtx list, int length) last insn in that SEQUENCE to point to us. Similarly for the first insn in the following insn if it is a SEQUENCE. */ - if (PREV_INSN (seq_insn) && GET_CODE (PREV_INSN (seq_insn)) == INSN + if (PREV_INSN (seq_insn) && NONJUMP_INSN_P (PREV_INSN (seq_insn)) && GET_CODE (PATTERN (PREV_INSN (seq_insn))) == SEQUENCE) NEXT_INSN (XVECEXP (PATTERN (PREV_INSN (seq_insn)), 0, XVECLEN (PATTERN (PREV_INSN (seq_insn)), 0) - 1)) = seq_insn; - if (NEXT_INSN (seq_insn) && GET_CODE (NEXT_INSN (seq_insn)) == INSN + if (NEXT_INSN (seq_insn) && NONJUMP_INSN_P (NEXT_INSN (seq_insn)) && GET_CODE (PATTERN (NEXT_INSN (seq_insn))) == SEQUENCE) PREV_INSN (XVECEXP (PATTERN (NEXT_INSN (seq_insn)), 0, 0)) = seq_insn; @@ -532,8 +569,7 @@ emit_delay_sequence (rtx insn, rtx list, int length) if (had_barrier) emit_barrier_after (seq_insn); - if (i != length + 1) - abort (); + gcc_assert (i == length + 1); return seq_insn; } @@ -570,6 +606,7 @@ delete_from_delay_slot (rtx insn) rtx trial, seq_insn, seq, prev; rtx delay_list = 0; int i; + int had_barrier = 0; /* We first must find the insn containing the SEQUENCE with INSN in its delay slot. Do this by finding an insn, TRIAL, where @@ -583,6 +620,9 @@ delete_from_delay_slot (rtx insn) seq_insn = PREV_INSN (NEXT_INSN (trial)); seq = PATTERN (seq_insn); + if (NEXT_INSN (seq_insn) && BARRIER_P (NEXT_INSN (seq_insn))) + had_barrier = 1; + /* Create a delay list consisting of all the insns other than the one we are deleting (unless we were the only one). */ if (XVECLEN (seq, 0) > 2) @@ -597,17 +637,15 @@ delete_from_delay_slot (rtx insn) delete_related_insns (seq_insn); add_insn_after (trial, prev); - if (GET_CODE (trial) == JUMP_INSN - && (simplejump_p (trial) || GET_CODE (PATTERN (trial)) == RETURN)) + /* If there was a barrier after the old SEQUENCE, remit it. */ + if (had_barrier) emit_barrier_after (trial); /* If there are any delay insns, remit them. Otherwise clear the annul flag. */ if (delay_list) trial = emit_delay_sequence (trial, delay_list, XVECLEN (seq, 0) - 2); - else if (GET_CODE (trial) == JUMP_INSN - || GET_CODE (trial) == CALL_INSN - || GET_CODE (trial) == INSN) + else if (INSN_P (trial)) INSN_ANNULLED_BRANCH_P (trial) = 0; INSN_FROM_TARGET_P (insn) = 0; @@ -650,7 +688,7 @@ delete_scheduled_jump (rtx insn) a delay slot. It will be the last insn in the delay slot, if it is. */ rtx trial = previous_insn (insn); - if (GET_CODE (trial) == NOTE) + if (NOTE_P (trial)) trial = prev_nonnote_insn (trial); if (sets_cc0_p (PATTERN (trial)) != 1 || FIND_REG_INC_NOTE (trial, NULL_RTX)) @@ -727,13 +765,12 @@ optimize_skip (rtx insn) rtx trial = next_nonnote_insn (insn); rtx next_trial = next_active_insn (trial); rtx delay_list = 0; - rtx target_label; int flags; flags = get_jump_flags (insn, JUMP_LABEL (insn)); if (trial == 0 - || GET_CODE (trial) != INSN + || !NONJUMP_INSN_P (trial) || GET_CODE (PATTERN (trial)) == SEQUENCE || recog_memoized (trial) < 0 || (! eligible_for_annul_false (insn, 0, trial, flags) @@ -750,7 +787,7 @@ optimize_skip (rtx insn) if ((next_trial == next_active_insn (JUMP_LABEL (insn)) && ! (next_trial == 0 && current_function_epilogue_delay_list != 0)) || (next_trial != 0 - && GET_CODE (next_trial) == JUMP_INSN + && JUMP_P (next_trial) && JUMP_LABEL (insn) == JUMP_LABEL (next_trial) && (simplejump_p (next_trial) || GET_CODE (PATTERN (next_trial)) == RETURN))) @@ -772,21 +809,24 @@ optimize_skip (rtx insn) branch, thread our jump to the target of that branch. Don't change this into a RETURN here, because it may not accept what we have in the delay slot. We'll fix this up later. */ - if (next_trial && GET_CODE (next_trial) == JUMP_INSN + if (next_trial && JUMP_P (next_trial) && (simplejump_p (next_trial) || GET_CODE (PATTERN (next_trial)) == RETURN)) { - target_label = JUMP_LABEL (next_trial); + rtx target_label = JUMP_LABEL (next_trial); if (target_label == 0) target_label = find_end_label (); - /* Recompute the flags based on TARGET_LABEL since threading - the jump to TARGET_LABEL may change the direction of the - jump (which may change the circumstances in which the - delay slot is nullified). */ - flags = get_jump_flags (insn, target_label); - if (eligible_for_annul_true (insn, 0, trial, flags)) - reorg_redirect_jump (insn, target_label); + if (target_label) + { + /* Recompute the flags based on TARGET_LABEL since threading + the jump to TARGET_LABEL may change the direction of the + jump (which may change the circumstances in which the + delay slot is nullified). */ + flags = get_jump_flags (insn, target_label); + if (eligible_for_annul_true (insn, 0, trial, flags)) + reorg_redirect_jump (insn, target_label); + } } INSN_ANNULLED_BRANCH_P (insn) = 1; @@ -813,7 +853,7 @@ get_jump_flags (rtx insn, rtx label) If LABEL is zero, then there is no way to determine the branch direction. */ - if (GET_CODE (insn) == JUMP_INSN + if (JUMP_P (insn) && (condjump_p (insn) || condjump_in_parallel_p (insn)) && INSN_UID (insn) <= max_uid && label != 0 @@ -829,7 +869,7 @@ get_jump_flags (rtx insn, rtx label) determine the branch prediction. Non conditional branches are predicted as very likely taken. */ - if (GET_CODE (insn) == JUMP_INSN + if (JUMP_P (insn) && (condjump_p (insn) || condjump_in_parallel_p (insn))) { int prediction; @@ -851,7 +891,7 @@ get_jump_flags (rtx insn, rtx label) break; default: - abort (); + gcc_unreachable (); } } else @@ -873,7 +913,7 @@ rare_destination (rtx insn) for (; insn; insn = next) { - if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE) + if (NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE) insn = XVECEXP (PATTERN (insn), 0, 0); next = NEXT_INSN (insn); @@ -959,20 +999,10 @@ mostly_true_jump (rtx jump_insn, rtx condition) before the next real insn, we assume the branch is to the top of the loop. */ for (insn = PREV_INSN (target_label); - insn && GET_CODE (insn) == NOTE; + insn && NOTE_P (insn); insn = PREV_INSN (insn)) if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG) return 2; - - /* If this is a jump to the test of a loop, it is likely true. We scan - forwards from the target label. If we find a NOTE_INSN_LOOP_VTOP - before the next real insn, we assume the branch is to the loop branch - test. */ - for (insn = NEXT_INSN (target_label); - insn && GET_CODE (insn) == NOTE; - insn = PREV_INSN (insn)) - if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_VTOP) - return 1; } /* Look at the relative rarities of the fallthrough and destination. If @@ -1468,7 +1498,7 @@ try_merge_delay_insns (rtx insn, rtx thread) next_trial = next_nonnote_insn (trial); /* TRIAL must be a CALL_INSN or INSN. Skip USE and CLOBBER. */ - if (GET_CODE (trial) == INSN + if (NONJUMP_INSN_P (trial) && (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)) continue; @@ -1516,7 +1546,7 @@ try_merge_delay_insns (rtx insn, rtx thread) /* See if we stopped on a filled insn. If we did, try to see if its delay slots match. */ if (slot_number != num_slots - && trial && GET_CODE (trial) == INSN + && trial && NONJUMP_INSN_P (trial) && GET_CODE (PATTERN (trial)) == SEQUENCE && ! INSN_ANNULLED_BRANCH_P (XVECEXP (PATTERN (trial), 0, 0))) { @@ -1641,7 +1671,7 @@ redundant_insn (rtx insn, rtx target, rtx delay_list) trial && insns_to_search > 0; trial = PREV_INSN (trial), --insns_to_search) { - if (GET_CODE (trial) == CODE_LABEL) + if (LABEL_P (trial)) return 0; if (! INSN_P (trial)) @@ -1655,7 +1685,7 @@ redundant_insn (rtx insn, rtx target, rtx delay_list) { /* Stop for a CALL and its delay slots because it is difficult to track its resource needs correctly. */ - if (GET_CODE (XVECEXP (pat, 0, 0)) == CALL_INSN) + if (CALL_P (XVECEXP (pat, 0, 0))) return 0; /* Stop for an INSN or JUMP_INSN with delayed effects and its delay @@ -1703,7 +1733,7 @@ redundant_insn (rtx insn, rtx target, rtx delay_list) mark_referenced_resources (insn, &needed, 1); /* If TARGET is a SEQUENCE, get the main insn. */ - if (GET_CODE (target) == INSN && GET_CODE (PATTERN (target)) == SEQUENCE) + if (NONJUMP_INSN_P (target) && GET_CODE (PATTERN (target)) == SEQUENCE) target_main = XVECEXP (PATTERN (target), 0, 0); if (resource_conflicts_p (&needed, &set) @@ -1732,7 +1762,7 @@ redundant_insn (rtx insn, rtx target, rtx delay_list) delay_list = XEXP (delay_list, 1); } - if (GET_CODE (target) == INSN && GET_CODE (PATTERN (target)) == SEQUENCE) + if (NONJUMP_INSN_P (target) && GET_CODE (PATTERN (target)) == SEQUENCE) for (i = 1; i < XVECLEN (PATTERN (target), 0); i++) if (insn_sets_resource_p (XVECEXP (PATTERN (target), 0, i), &needed, 1)) return 0; @@ -1742,11 +1772,10 @@ redundant_insn (rtx insn, rtx target, rtx delay_list) for (trial = PREV_INSN (target), insns_to_search = MAX_DELAY_SLOT_INSN_SEARCH; - trial && GET_CODE (trial) != CODE_LABEL && insns_to_search > 0; + trial && !LABEL_P (trial) && insns_to_search > 0; trial = PREV_INSN (trial), --insns_to_search) { - if (GET_CODE (trial) != INSN && GET_CODE (trial) != CALL_INSN - && GET_CODE (trial) != JUMP_INSN) + if (!INSN_P (trial)) continue; pat = PATTERN (trial); @@ -1757,7 +1786,7 @@ redundant_insn (rtx insn, rtx target, rtx delay_list) { /* If this is a CALL_INSN and its delay slots, it is hard to track the resource needs properly, so give up. */ - if (GET_CODE (XVECEXP (pat, 0, 0)) == CALL_INSN) + if (CALL_P (XVECEXP (pat, 0, 0))) return 0; /* If this is an INSN or JUMP_INSN with delayed effects, it @@ -1841,7 +1870,7 @@ own_thread_p (rtx thread, rtx label, int allow_fallthrough) active_insn = next_active_insn (PREV_INSN (thread)); for (insn = thread; insn != active_insn; insn = NEXT_INSN (insn)) - if (GET_CODE (insn) == CODE_LABEL + if (LABEL_P (insn) && (insn != label || LABEL_NUSES (insn) != 1)) return 0; @@ -1850,11 +1879,11 @@ own_thread_p (rtx thread, rtx label, int allow_fallthrough) /* Ensure that we reach a BARRIER before any insn or label. */ for (insn = prev_nonnote_insn (thread); - insn == 0 || GET_CODE (insn) != BARRIER; + insn == 0 || !BARRIER_P (insn); insn = prev_nonnote_insn (insn)) if (insn == 0 - || GET_CODE (insn) == CODE_LABEL - || (GET_CODE (insn) == INSN + || LABEL_P (insn) + || (NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) != USE && GET_CODE (PATTERN (insn)) != CLOBBER)) return 0; @@ -1920,7 +1949,7 @@ update_reg_dead_notes (rtx insn, rtx delayed_insn) next = XEXP (link, 1); if (REG_NOTE_KIND (link) != REG_DEAD - || GET_CODE (XEXP (link, 0)) != REG) + || !REG_P (XEXP (link, 0))) continue; if (reg_referenced_p (XEXP (link, 0), PATTERN (insn))) @@ -1953,7 +1982,7 @@ fix_reg_dead_note (rtx start_insn, rtx stop_insn) next = XEXP (link, 1); if (REG_NOTE_KIND (link) != REG_DEAD - || GET_CODE (XEXP (link, 0)) != REG) + || !REG_P (XEXP (link, 0))) continue; if (reg_set_p (XEXP (link, 0), PATTERN (start_insn))) @@ -1981,7 +2010,7 @@ update_reg_unused_notes (rtx insn, rtx redundant_insn) next = XEXP (link, 1); if (REG_NOTE_KIND (link) != REG_UNUSED - || GET_CODE (XEXP (link, 0)) != REG) + || !REG_P (XEXP (link, 0))) continue; if (! find_regno_note (redundant_insn, REG_UNUSED, @@ -2023,10 +2052,10 @@ fill_simple_delay_slots (int non_jumps_p) insn = unfilled_slots_base[i]; if (insn == 0 || INSN_DELETED_P (insn) - || (GET_CODE (insn) == INSN + || (NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE) - || (GET_CODE (insn) == JUMP_INSN && non_jumps_p) - || (GET_CODE (insn) != JUMP_INSN && ! non_jumps_p)) + || (JUMP_P (insn) && non_jumps_p) + || (!JUMP_P (insn) && ! non_jumps_p)) continue; /* It may have been that this insn used to need delay slots, but @@ -2070,13 +2099,13 @@ fill_simple_delay_slots (int non_jumps_p) slots_filled = 0; delay_list = 0; - if (GET_CODE (insn) == JUMP_INSN) + if (JUMP_P (insn)) flags = get_jump_flags (insn, JUMP_LABEL (insn)); else flags = get_jump_flags (insn, NULL_RTX); if ((trial = next_active_insn (insn)) - && GET_CODE (trial) == JUMP_INSN + && JUMP_P (trial) && simplejump_p (trial) && eligible_for_delay (insn, slots_filled, trial, flags) && no_labels_between_p (insn, trial) @@ -2183,7 +2212,7 @@ fill_simple_delay_slots (int non_jumps_p) #if defined(ANNUL_IFFALSE_SLOTS) || defined(ANNUL_IFTRUE_SLOTS) if (slots_filled != slots_to_fill && delay_list == 0 - && GET_CODE (insn) == JUMP_INSN + && JUMP_P (insn) && (condjump_p (insn) || condjump_in_parallel_p (insn))) { delay_list = optimize_skip (insn); @@ -2227,7 +2256,7 @@ fill_simple_delay_slots (int non_jumps_p) Presumably, we should also check to see if we could get back to this function via `setjmp'. */ && ! can_throw_internal (insn) - && (GET_CODE (insn) != JUMP_INSN + && (!JUMP_P (insn) || ((condjump_p (insn) || condjump_in_parallel_p (insn)) && ! simplejump_p (insn) && JUMP_LABEL (insn) != 0))) @@ -2241,7 +2270,7 @@ fill_simple_delay_slots (int non_jumps_p) CLEAR_RESOURCE (&needed); CLEAR_RESOURCE (&set); - if (GET_CODE (insn) == CALL_INSN) + if (CALL_P (insn)) { mark_set_resources (insn, &set, 0, MARK_SRC_DEST_CALL); mark_referenced_resources (insn, &needed, 1); @@ -2251,7 +2280,7 @@ fill_simple_delay_slots (int non_jumps_p) { mark_set_resources (insn, &set, 0, MARK_SRC_DEST_CALL); mark_referenced_resources (insn, &needed, 1); - if (GET_CODE (insn) == JUMP_INSN) + if (JUMP_P (insn)) target = JUMP_LABEL (insn); } @@ -2260,8 +2289,8 @@ fill_simple_delay_slots (int non_jumps_p) { next_trial = next_nonnote_insn (trial); - if (GET_CODE (trial) == CODE_LABEL - || GET_CODE (trial) == BARRIER) + if (LABEL_P (trial) + || BARRIER_P (trial)) break; /* We must have an INSN, JUMP_INSN, or CALL_INSN. */ @@ -2279,7 +2308,7 @@ fill_simple_delay_slots (int non_jumps_p) trial_delay = trial; /* Stop our search when seeing an unconditional jump. */ - if (GET_CODE (trial_delay) == JUMP_INSN) + if (JUMP_P (trial_delay)) break; /* See if we have a resource problem before we try to @@ -2319,8 +2348,8 @@ fill_simple_delay_slots (int non_jumps_p) set.cc = 1; /* If this is a call or jump, we might not get here. */ - if (GET_CODE (trial_delay) == CALL_INSN - || GET_CODE (trial_delay) == JUMP_INSN) + if (CALL_P (trial_delay) + || JUMP_P (trial_delay)) maybe_never = 1; } @@ -2331,13 +2360,13 @@ fill_simple_delay_slots (int non_jumps_p) Don't do this if the insn at the branch target is a branch. */ if (slots_to_fill != slots_filled && trial - && GET_CODE (trial) == JUMP_INSN + && JUMP_P (trial) && simplejump_p (trial) && (target == 0 || JUMP_LABEL (trial) == target) && (next_trial = next_active_insn (JUMP_LABEL (trial))) != 0 - && ! (GET_CODE (next_trial) == INSN + && ! (NONJUMP_INSN_P (next_trial) && GET_CODE (PATTERN (next_trial)) == SEQUENCE) - && GET_CODE (next_trial) != JUMP_INSN + && !JUMP_P (next_trial) && ! insn_references_resource_p (next_trial, &set, 1) && ! insn_sets_resource_p (next_trial, &set, 1) && ! insn_sets_resource_p (next_trial, &needed, 1) @@ -2358,21 +2387,24 @@ fill_simple_delay_slots (int non_jumps_p) else new_label = find_end_label (); - delay_list - = add_to_delay_list (copy_rtx (next_trial), delay_list); - slots_filled++; - reorg_redirect_jump (trial, new_label); - - /* If we merged because we both jumped to the same place, - redirect the original insn also. */ - if (target) - reorg_redirect_jump (insn, new_label); + if (new_label) + { + delay_list + = add_to_delay_list (copy_rtx (next_trial), delay_list); + slots_filled++; + reorg_redirect_jump (trial, new_label); + + /* If we merged because we both jumped to the same place, + redirect the original insn also. */ + if (target) + reorg_redirect_jump (insn, new_label); + } } } /* If this is an unconditional jump, then try to get insns from the target of the jump. */ - if (GET_CODE (insn) == JUMP_INSN + if (JUMP_P (insn) && simplejump_p (insn) && slots_filled != slots_to_fill) delay_list @@ -2438,7 +2470,7 @@ fill_simple_delay_slots (int non_jumps_p) for (trial = get_last_insn (); ! stop_search_p (trial, 1); trial = PREV_INSN (trial)) { - if (GET_CODE (trial) == NOTE) + if (NOTE_P (trial)) continue; pat = PATTERN (trial); if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER) @@ -2521,9 +2553,8 @@ fill_slots_from_thread (rtx insn, rtx condition, rtx thread, int flags; /* Validate our arguments. */ - if ((condition == const_true_rtx && ! thread_if_true) - || (! own_thread && ! thread_if_true)) - abort (); + gcc_assert(condition != const_true_rtx || thread_if_true); + gcc_assert(own_thread || thread_if_true); flags = get_jump_flags (insn, JUMP_LABEL (insn)); @@ -2566,7 +2597,7 @@ fill_slots_from_thread (rtx insn, rtx condition, rtx thread, rtx pat, old_trial; /* If we have passed a label, we no longer own this thread. */ - if (GET_CODE (trial) == CODE_LABEL) + if (LABEL_P (trial)) { own_thread = 0; continue; @@ -2687,12 +2718,12 @@ fill_slots_from_thread (rtx insn, rtx condition, rtx thread, label lest it be deleted by delete_related_insns. */ note = find_reg_note (trial, REG_LABEL, 0); /* REG_LABEL could be NOTE_INSN_DELETED_LABEL too. */ - if (note && GET_CODE (XEXP (note, 0)) == CODE_LABEL) + if (note && LABEL_P (XEXP (note, 0))) LABEL_NUSES (XEXP (note, 0))++; delete_related_insns (trial); - if (note && GET_CODE (XEXP (note, 0)) == CODE_LABEL) + if (note && LABEL_P (XEXP (note, 0))) LABEL_NUSES (XEXP (note, 0))--; } else @@ -2759,14 +2790,14 @@ fill_slots_from_thread (rtx insn, rtx condition, rtx thread, a PRE_INC. We also can't do this if there's overlap of source and destination. Overlap may happen for larger-than-register-size modes. */ - if (GET_CODE (trial) == INSN && GET_CODE (pat) == SET - && GET_CODE (SET_SRC (pat)) == REG - && GET_CODE (SET_DEST (pat)) == REG + if (NONJUMP_INSN_P (trial) && GET_CODE (pat) == SET + && REG_P (SET_SRC (pat)) + && REG_P (SET_DEST (pat)) && !reg_overlap_mentioned_p (SET_DEST (pat), SET_SRC (pat))) { rtx next = next_nonnote_insn (trial); - if (next && GET_CODE (next) == INSN + if (next && NONJUMP_INSN_P (next) && GET_CODE (PATTERN (next)) != USE && ! reg_set_p (SET_DEST (pat), next) && ! reg_set_p (SET_SRC (pat), next) @@ -2778,9 +2809,9 @@ fill_slots_from_thread (rtx insn, rtx condition, rtx thread, /* If we stopped on a branch insn that has delay slots, see if we can steal some of the insns in those slots. */ - if (trial && GET_CODE (trial) == INSN + if (trial && NONJUMP_INSN_P (trial) && GET_CODE (PATTERN (trial)) == SEQUENCE - && GET_CODE (XVECEXP (PATTERN (trial), 0, 0)) == JUMP_INSN) + && JUMP_P (XVECEXP (PATTERN (trial), 0, 0))) { /* If this is the `true' thread, we will want to follow the jump, so we can only do this if we have taken everything up to here. */ @@ -2813,7 +2844,7 @@ fill_slots_from_thread (rtx insn, rtx condition, rtx thread, arithmetic insn after the jump insn and put the arithmetic insn in the delay slot. If we can't do this, return. */ if (delay_list == 0 && likely && new_thread - && GET_CODE (new_thread) == INSN + && NONJUMP_INSN_P (new_thread) && GET_CODE (PATTERN (new_thread)) != ASM_INPUT && asm_noperands (PATTERN (new_thread)) < 0) { @@ -2824,7 +2855,7 @@ fill_slots_from_thread (rtx insn, rtx condition, rtx thread, trial = new_thread; pat = PATTERN (trial); - if (GET_CODE (trial) != INSN + if (!NONJUMP_INSN_P (trial) || GET_CODE (pat) != SET || ! eligible_for_delay (insn, 0, trial, flags) || can_throw_internal (trial)) @@ -2893,10 +2924,9 @@ fill_slots_from_thread (rtx insn, rtx condition, rtx thread, { rtx label; - if (! thread_if_true) - abort (); + gcc_assert (thread_if_true); - if (new_thread && GET_CODE (new_thread) == JUMP_INSN + if (new_thread && JUMP_P (new_thread) && (simplejump_p (new_thread) || GET_CODE (PATTERN (new_thread)) == RETURN) && redirect_with_delay_list_safe_p (insn, @@ -2906,12 +2936,13 @@ fill_slots_from_thread (rtx insn, rtx condition, rtx thread, if (new_thread == 0) label = find_end_label (); - else if (GET_CODE (new_thread) == CODE_LABEL) + else if (LABEL_P (new_thread)) label = new_thread; else label = get_label_before (new_thread); - reorg_redirect_jump (insn, label); + if (label) + reorg_redirect_jump (insn, label); } return delay_list; @@ -2946,7 +2977,7 @@ fill_eager_delay_slots (void) insn = unfilled_slots_base[i]; if (insn == 0 || INSN_DELETED_P (insn) - || GET_CODE (insn) != JUMP_INSN + || !JUMP_P (insn) || ! (condjump_p (insn) || condjump_in_parallel_p (insn))) continue; @@ -3071,33 +3102,30 @@ relax_delay_slots (rtx first) /* If this is a jump insn, see if it now jumps to a jump, jumps to the next insn, or jumps to a label that is not the last of a group of consecutive labels. */ - if (GET_CODE (insn) == JUMP_INSN + if (JUMP_P (insn) && (condjump_p (insn) || condjump_in_parallel_p (insn)) && (target_label = JUMP_LABEL (insn)) != 0) { - target_label = follow_jumps (target_label); - /* See comment further down why we must use next_real_insn here, - instead of next_active_insn. */ - target_label = prev_label (next_real_insn (target_label)); - + target_label = skip_consecutive_labels (follow_jumps (target_label)); if (target_label == 0) target_label = find_end_label (); - if (next_active_insn (target_label) == next + if (target_label && next_active_insn (target_label) == next && ! condjump_in_parallel_p (insn)) { delete_jump (insn); continue; } - if (target_label != JUMP_LABEL (insn)) + if (target_label && target_label != JUMP_LABEL (insn)) reorg_redirect_jump (insn, target_label); /* See if this jump branches around an unconditional jump. If so, invert this jump and point it to the target of the second jump. */ - if (next && GET_CODE (next) == JUMP_INSN + if (next && JUMP_P (next) && (simplejump_p (next) || GET_CODE (PATTERN (next)) == RETURN) + && target_label && next_active_insn (target_label) == next_active_insn (next) && no_labels_between_p (insn, next)) { @@ -3138,7 +3166,7 @@ relax_delay_slots (rtx first) Don't do this if we expect the conditional branch to be true, because we would then be making the more common case longer. */ - if (GET_CODE (insn) == JUMP_INSN + if (JUMP_P (insn) && (simplejump_p (insn) || GET_CODE (PATTERN (insn)) == RETURN) && (other = prev_active_insn (insn)) != 0 && (condjump_p (other) || condjump_in_parallel_p (other)) @@ -3155,7 +3183,7 @@ relax_delay_slots (rtx first) } /* Now look only at cases where we have filled a delay slot. */ - if (GET_CODE (insn) != INSN + if (!NONJUMP_INSN_P (insn) || GET_CODE (PATTERN (insn)) != SEQUENCE) continue; @@ -3182,7 +3210,7 @@ relax_delay_slots (rtx first) if (optimize_size && GET_CODE (PATTERN (delay_insn)) == RETURN && next - && GET_CODE (next) == JUMP_INSN + && JUMP_P (next) && GET_CODE (PATTERN (next)) == RETURN) { rtx after; @@ -3202,8 +3230,7 @@ relax_delay_slots (rtx first) trial = PREV_INSN (insn); delete_related_insns (insn); - if (GET_CODE (pat) != SEQUENCE) - abort (); + gcc_assert (GET_CODE (pat) == SEQUENCE); after = trial; for (i = 0; i < XVECLEN (pat, 0); i++) { @@ -3216,7 +3243,7 @@ relax_delay_slots (rtx first) } /* Now look only at the cases where we have a filled JUMP_INSN. */ - if (GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) != JUMP_INSN + if (!JUMP_P (XVECEXP (PATTERN (insn), 0, 0)) || ! (condjump_p (XVECEXP (PATTERN (insn), 0, 0)) || condjump_in_parallel_p (XVECEXP (PATTERN (insn), 0, 0)))) continue; @@ -3227,17 +3254,11 @@ relax_delay_slots (rtx first) { /* If this jump goes to another unconditional jump, thread it, but don't convert a jump into a RETURN here. */ - trial = follow_jumps (target_label); - /* We use next_real_insn instead of next_active_insn, so that - the special USE insns emitted by reorg won't be ignored. - If they are ignored, then they will get deleted if target_label - is now unreachable, and that would cause mark_target_live_regs - to fail. */ - trial = prev_label (next_real_insn (trial)); - if (trial == 0 && target_label != 0) + trial = skip_consecutive_labels (follow_jumps (target_label)); + if (trial == 0) trial = find_end_label (); - if (trial != target_label + if (trial && trial != target_label && redirect_with_delay_slots_safe_p (delay_insn, trial, insn)) { reorg_redirect_jump (delay_insn, trial); @@ -3251,30 +3272,31 @@ relax_delay_slots (rtx first) && redundant_insn (trial, insn, 0) && ! can_throw_internal (trial)) { - rtx tmp; - /* Figure out where to emit the special USE insn so we don't later incorrectly compute register live/death info. */ - tmp = next_active_insn (trial); + rtx tmp = next_active_insn (trial); if (tmp == 0) tmp = find_end_label (); - /* Insert the special USE insn and update dataflow info. */ - update_block (trial, tmp); + if (tmp) + { + /* Insert the special USE insn and update dataflow info. */ + update_block (trial, tmp); - /* Now emit a label before the special USE insn, and - redirect our jump to the new label. */ - target_label = get_label_before (PREV_INSN (tmp)); - reorg_redirect_jump (delay_insn, target_label); - next = insn; - continue; + /* Now emit a label before the special USE insn, and + redirect our jump to the new label. */ + target_label = get_label_before (PREV_INSN (tmp)); + reorg_redirect_jump (delay_insn, target_label); + next = insn; + continue; + } } /* Similarly, if it is an unconditional jump with one insn in its delay list and that insn is redundant, thread the jump. */ if (trial && GET_CODE (PATTERN (trial)) == SEQUENCE && XVECLEN (PATTERN (trial), 0) == 2 - && GET_CODE (XVECEXP (PATTERN (trial), 0, 0)) == JUMP_INSN + && JUMP_P (XVECEXP (PATTERN (trial), 0, 0)) && (simplejump_p (XVECEXP (PATTERN (trial), 0, 0)) || GET_CODE (PATTERN (XVECEXP (PATTERN (trial), 0, 0))) == RETURN) && redundant_insn (XVECEXP (PATTERN (trial), 0, 1), insn, 0)) @@ -3283,8 +3305,9 @@ relax_delay_slots (rtx first) if (target_label == 0) target_label = find_end_label (); - if (redirect_with_delay_slots_safe_p (delay_insn, target_label, - insn)) + if (target_label + && redirect_with_delay_slots_safe_p (delay_insn, target_label, + insn)) { reorg_redirect_jump (delay_insn, target_label); next = insn; @@ -3325,8 +3348,7 @@ relax_delay_slots (rtx first) trial = PREV_INSN (insn); delete_related_insns (insn); - if (GET_CODE (pat) != SEQUENCE) - abort (); + gcc_assert (GET_CODE (pat) == SEQUENCE); after = trial; for (i = 0; i < XVECLEN (pat, 0); i++) { @@ -3341,7 +3363,7 @@ relax_delay_slots (rtx first) /* See if this is an unconditional jump around a single insn which is identical to the one in its delay slot. In this case, we can just delete the branch and the insn in its delay slot. */ - if (next && GET_CODE (next) == INSN + if (next && NONJUMP_INSN_P (next) && prev_label (next_active_insn (next)) == target_label && simplejump_p (insn) && XVECLEN (pat, 0) == 2 @@ -3357,7 +3379,8 @@ relax_delay_slots (rtx first) annulled jumps, though. Again, don't convert a jump to a RETURN here. */ if (! INSN_ANNULLED_BRANCH_P (delay_insn) - && next && GET_CODE (next) == JUMP_INSN + && any_condjump_p (delay_insn) + && next && JUMP_P (next) && (simplejump_p (next) || GET_CODE (PATTERN (next)) == RETURN) && next_active_insn (target_label) == next_active_insn (next) && no_labels_between_p (insn, next)) @@ -3369,7 +3392,8 @@ relax_delay_slots (rtx first) label = find_end_label (); /* find_end_label can generate a new label. Check this first. */ - if (no_labels_between_p (insn, next) + if (label + && no_labels_between_p (insn, next) && redirect_with_delay_slots_safe_p (delay_insn, label, insn)) { /* Be careful how we do this to avoid deleting code or labels @@ -3444,7 +3468,7 @@ make_return_insns (rtx first) made for END_OF_FUNCTION_LABEL. If so, set up anything we can't change into a RETURN to jump to it. */ for (insn = first; insn; insn = NEXT_INSN (insn)) - if (GET_CODE (insn) == JUMP_INSN && GET_CODE (PATTERN (insn)) == RETURN) + if (JUMP_P (insn) && GET_CODE (PATTERN (insn)) == RETURN) { real_return_label = get_label_before (insn); break; @@ -3463,9 +3487,9 @@ make_return_insns (rtx first) /* Only look at filled JUMP_INSNs that go to the end of function label. */ - if (GET_CODE (insn) != INSN + if (!NONJUMP_INSN_P (insn) || GET_CODE (PATTERN (insn)) != SEQUENCE - || GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) != JUMP_INSN + || !JUMP_P (XVECEXP (PATTERN (insn), 0, 0)) || JUMP_LABEL (XVECEXP (PATTERN (insn), 0, 0)) != end_of_function_label) continue; @@ -3558,17 +3582,6 @@ dbr_schedule (rtx first, FILE *file) { rtx insn, next, epilogue_insn = 0; int i; -#if 0 - int old_flag_no_peephole = flag_no_peephole; - - /* Execute `final' once in prescan mode to delete any insns that won't be - used. Don't let final try to do any peephole optimization--it will - ruin dataflow information for this pass. */ - - flag_no_peephole = 1; - final (first, 0, NO_DEBUG, 1, 1); - flag_no_peephole = old_flag_no_peephole; -#endif /* If the current function has no insns other than the prologue and epilogue, then do not try to fill any delay slots. */ @@ -3581,7 +3594,7 @@ dbr_schedule (rtx first, FILE *file) { if (INSN_UID (insn) > max_uid) max_uid = INSN_UID (insn); - if (GET_CODE (insn) == NOTE + if (NOTE_P (insn) && NOTE_LINE_NUMBER (insn) == NOTE_INSN_EPILOGUE_BEG) epilogue_insn = insn; } @@ -3605,7 +3618,7 @@ dbr_schedule (rtx first, FILE *file) INSN_FROM_TARGET_P (insn) = 0; /* Skip vector tables. We can't get attributes for them. */ - if (GET_CODE (insn) == JUMP_INSN + if (JUMP_P (insn) && (GET_CODE (PATTERN (insn)) == ADDR_VEC || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)) continue; @@ -3614,10 +3627,10 @@ dbr_schedule (rtx first, FILE *file) obstack_ptr_grow (&unfilled_slots_obstack, insn); /* Ensure all jumps go to the last of a set of consecutive labels. */ - if (GET_CODE (insn) == JUMP_INSN + if (JUMP_P (insn) && (condjump_p (insn) || condjump_in_parallel_p (insn)) && JUMP_LABEL (insn) != 0 - && ((target = prev_label (next_active_insn (JUMP_LABEL (insn)))) + && ((target = skip_consecutive_labels (JUMP_LABEL (insn))) != JUMP_LABEL (insn))) redirect_jump (insn, target, 1); } @@ -3650,7 +3663,7 @@ dbr_schedule (rtx first, FILE *file) { next = NEXT_INSN (insn); - if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == USE + if (NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == USE && INSN_P (XEXP (PATTERN (insn), 0))) next = delete_related_insns (insn); } @@ -3707,7 +3720,7 @@ dbr_schedule (rtx first, FILE *file) for (insn = first; insn; insn = NEXT_INSN (insn)) { if (! INSN_DELETED_P (insn) - && GET_CODE (insn) == INSN + && NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) != USE && GET_CODE (PATTERN (insn)) != CLOBBER) { @@ -3765,14 +3778,14 @@ dbr_schedule (rtx first, FILE *file) { int pred_flags; - if (GET_CODE (insn) == INSN) + if (NONJUMP_INSN_P (insn)) { rtx pat = PATTERN (insn); if (GET_CODE (pat) == SEQUENCE) insn = XVECEXP (pat, 0, 0); } - if (GET_CODE (insn) != JUMP_INSN) + if (!JUMP_P (insn)) continue; pred_flags = get_jump_flags (insn, JUMP_LABEL (insn));