static rtx find_end_label PROTO((void));
static rtx emit_delay_sequence PROTO((rtx, rtx, int));
static rtx add_to_delay_list PROTO((rtx, rtx));
-static void delete_from_delay_slot PROTO((rtx));
+static rtx delete_from_delay_slot PROTO((rtx));
static void delete_scheduled_jump PROTO((rtx));
static void note_delay_statistics PROTO((int, int));
static rtx optimize_skip PROTO((rtx));
/* Delete INSN from the delay slot of the insn that it is in. This may
produce an insn without anything in its delay slots. */
-static void
+static rtx
delete_from_delay_slot (insn)
rtx insn;
{
/* Show we need to fill this insn again. */
obstack_ptr_grow (&unfilled_slots_obstack, trial);
+
+ return trial;
}
\f
/* Delete INSN, a JUMP_INSN. If it is a conditional jump, we must track down
return (li == NULL);
}
+/* DELAY_LIST is a list of insns that have already been placed into delay
+ slots. See if all of them have the same annulling status as ANNUL_TRUE_P.
+ If not, return 0; otherwise return 1. */
+
+static int
+check_annul_list_true_false (annul_true_p, delay_list)
+ int annul_true_p;
+ rtx delay_list;
+{
+ rtx temp, trial;
+
+ if (delay_list)
+ {
+ for (temp = delay_list; temp; temp = XEXP (temp, 1))
+ {
+ rtx trial = XEXP (temp, 0);
+
+ if ((annul_true_p && INSN_FROM_TARGET_P (trial))
+ || (!annul_true_p && !INSN_FROM_TARGET_P (trial)))
+ return 0;
+ }
+ }
+ return 1;
+}
+
\f
/* INSN branches to an insn whose pattern SEQ is a SEQUENCE. Given that
the condition tested by INSN is CONDITION and the resources shown in
|| (! insn_sets_resource_p (trial, other_needed, 0)
&& ! may_trap_p (PATTERN (trial)))))
? eligible_for_delay (insn, total_slots_filled, trial, flags)
- : (must_annul = 1,
- eligible_for_annul_false (insn, total_slots_filled, trial, flags)))
+ : (must_annul || (delay_list == NULL && new_delay_list == NULL))
+ && (must_annul = 1,
+ check_annul_list_true_false (0, delay_list)
+ && check_annul_list_true_false (0, new_delay_list)
+ && eligible_for_annul_false (insn, total_slots_filled,
+ trial, flags)))
{
+ if (must_annul)
+ used_annul = 1;
temp = copy_rtx (trial);
INSN_FROM_TARGET_P (temp) = 1;
new_delay_list = add_to_delay_list (temp, new_delay_list);
/* Add any new insns to the delay list and update the count of the
number of slots filled. */
*pslots_filled = total_slots_filled;
- *pannul_p = must_annul;
+ if (used_annul)
+ *pannul_p = 1;
if (delay_list == 0)
return new_delay_list;
{
int i;
int flags;
+ int must_annul = *pannul_p;
+ int used_annul = 0;
flags = get_jump_flags (insn, JUMP_LABEL (insn));
continue;
}
- if (! *pannul_p
+ if (! must_annul
&& ((condition == const_true_rtx
|| (! insn_sets_resource_p (trial, other_needed, 0)
&& ! may_trap_p (PATTERN (trial)))))
? eligible_for_delay (insn, *pslots_filled, trial, flags)
- : (*pannul_p = 1,
- eligible_for_annul_true (insn, *pslots_filled, trial, flags)))
+ : (must_annul || delay_list == NULL) && (must_annul = 1,
+ check_annul_list_true_false (1, delay_list)
+ && eligible_for_annul_true (insn, *pslots_filled, trial, flags)))
{
+ if (must_annul)
+ used_annul = 1;
delete_from_delay_slot (trial);
delay_list = add_to_delay_list (trial, delay_list);
break;
}
+ if (used_annul)
+ *pannul_p = 1;
return delay_list;
}
+
\f
/* Try merging insns starting at THREAD which match exactly the insns in
INSN's delay list.
CLEAR_RESOURCE (&set);
/* If this is not an annulling branch, take into account anything needed in
- NEXT_TO_MATCH. This prevents two increments from being incorrectly
+ INSN's delay slot. This prevents two increments from being incorrectly
folded into one. If we are annulling, this would be the correct
thing to do. (The alternative, looking at things set in NEXT_TO_MATCH
will essentially disable this optimization. This method is somewhat of
a kludge, but I don't see a better way.) */
if (! annul_p)
- mark_referenced_resources (next_to_match, &needed, 1);
+ for (i = 1 ; i < num_slots ; i++)
+ if (XVECEXP (PATTERN (insn), 0, i))
+ mark_referenced_resources (XVECEXP (PATTERN (insn), 0, i), &needed, 1);
for (trial = thread; !stop_search_p (trial, 1); trial = next_trial)
{
break;
next_to_match = XVECEXP (PATTERN (insn), 0, slot_number);
- if (! annul_p)
- mark_referenced_resources (next_to_match, &needed, 1);
}
mark_set_resources (trial, &set, 0, 1);
{
if (! annul_p)
{
+ rtx new;
+
update_block (dtrial, thread);
- delete_from_delay_slot (dtrial);
+ new = delete_from_delay_slot (dtrial);
+ if (INSN_DELETED_P (thread))
+ thread = new;
INSN_FROM_TARGET_P (next_to_match) = 0;
}
else
next_to_match = XVECEXP (PATTERN (insn), 0, slot_number);
}
+ else
+ {
+ /* Keep track of the set/referenced resources for
+ mark_set_resources (dtrial, &set, 0, 1);
+ mark_referenced_resources (dtrial, &needed, 1);
+ }
}
}
{
if (GET_MODE (merged_insns) == SImode)
{
+ rtx new;
+
update_block (XEXP (merged_insns, 0), thread);
- delete_from_delay_slot (XEXP (merged_insns, 0));
+ new = delete_from_delay_slot (XEXP (merged_insns, 0));
+ if (INSN_DELETED_P (thread))
+ thread = new;
}
else
{
/* There are two ways we can win: If TRIAL doesn't set anything
needed at the opposite thread and can't trap, or if it can
go into an annulled delay slot. */
- if (condition == const_true_rtx
- || (! insn_sets_resource_p (trial, &opposite_needed, 1)
- && ! may_trap_p (pat)))
+ if (!must_annul
+ && (condition == const_true_rtx
+ || (! insn_sets_resource_p (trial, &opposite_needed, 1)
+ && ! may_trap_p (pat))))
{
old_trial = trial;
trial = try_split (pat, trial, 0);
if (thread == old_trial)
thread = trial;
pat = PATTERN (trial);
- if ((thread_if_true
- ? eligible_for_annul_false (insn, *pslots_filled, trial, flags)
- : eligible_for_annul_true (insn, *pslots_filled, trial, flags)))
+ if ((must_annul || delay_list == NULL) && (thread_if_true
+ ? check_annul_list_true_false (0, delay_list)
+ && eligible_for_annul_false (insn, *pslots_filled, trial, flags)
+ : check_annul_list_true_false (1, delay_list)
+ && eligible_for_annul_true (insn, *pslots_filled, trial, flags)))
{
rtx temp;