get_condition (rtx insn)
{
rtx pat = PATTERN (insn);
- rtx cond;
+ rtx src;
if (pat == 0)
return 0;
+
if (GET_CODE (pat) == COND_EXEC)
return COND_EXEC_TEST (pat);
- if (GET_CODE (insn) != JUMP_INSN)
- return 0;
- if (GET_CODE (pat) != SET || SET_SRC (pat) != pc_rtx)
- return 0;
- if (GET_CODE (SET_DEST (pat)) != IF_THEN_ELSE)
- return 0;
- pat = SET_DEST (pat);
- cond = XEXP (pat, 0);
- if (GET_CODE (XEXP (cond, 1)) == LABEL_REF
- && XEXP (cond, 2) == pc_rtx)
- return cond;
- else if (GET_CODE (XEXP (cond, 2)) == LABEL_REF
- && XEXP (cond, 1) == pc_rtx)
- return gen_rtx_fmt_ee (reverse_condition (GET_CODE (cond)), GET_MODE (cond),
- XEXP (cond, 0), XEXP (cond, 1));
- else
+
+ if (!any_condjump_p (insn) || !onlyjump_p (insn))
return 0;
+
+ src = SET_SRC (pc_set (insn));
+#if 0
+ /* The previous code here was completely invalid and could never extract
+ the condition from a jump. This code does the correct thing, but that
+ triggers latent bugs later in the scheduler on ports with conditional
+ execution. So this is disabled for now. */
+ if (XEXP (src, 2) == pc_rtx)
+ return XEXP (src, 0);
+ else if (XEXP (src, 1) == pc_rtx)
+ {
+ rtx cond = XEXP (src, 0);
+ enum rtx_code revcode = reversed_comparison_code (cond, insn);
+
+ if (revcode == UNKNOWN)
+ return 0;
+ return gen_rtx_fmt_ee (revcode, GET_MODE (cond), XEXP (cond, 0),
+ XEXP (cond, 1));
+ }
+#endif
+
+ return 0;
}
/* Return nonzero if conditions COND1 and COND2 can never be both true. */
{
if (COMPARISON_P (cond1)
&& COMPARISON_P (cond2)
- && GET_CODE (cond1) == reverse_condition (GET_CODE (cond2))
+ && GET_CODE (cond1) == reversed_comparison_code (cond2, NULL)
&& XEXP (cond1, 0) == XEXP (cond2, 0)
&& XEXP (cond1, 1) == XEXP (cond2, 1))
return 1;
/* We can get a dependency on deleted insns due to optimizations in
the register allocation and reloading or due to splitting. Any
such dependency is useless and can be ignored. */
- if (GET_CODE (elem) == NOTE)
+ if (NOTE_P (elem))
return 0;
/* flow.c doesn't handle conditional lifetimes entirely correctly;
/* ??? add_dependence is the wrong place to be eliding dependencies,
as that forgets that the condition expressions themselves may
be dependent. */
- if (GET_CODE (insn) != CALL_INSN && GET_CODE (elem) != CALL_INSN)
+ if (!CALL_P (insn) && !CALL_P (elem))
{
cond1 = get_condition (insn);
cond2 = get_condition (elem);
No need for interblock dependences with calls, since
calls are not moved between blocks. Note: the edge where
elem is a CALL is still required. */
- if (GET_CODE (insn) == CALL_INSN
+ if (CALL_P (insn)
&& (INSN_BB (elem) != INSN_BB (insn)))
return 0;
#endif
dest = XEXP (dest, 0);
}
- if (GET_CODE (dest) == REG)
+ if (REG_P (dest))
{
regno = REGNO (dest);
if (!reload_completed && get_reg_known_equiv_p (regno))
{
rtx t = get_reg_known_value (regno);
- if (GET_CODE (t) == MEM)
+ if (MEM_P (t))
sched_analyze_2 (deps, XEXP (t, 0), insn);
}
add_dependence_list (insn, deps->last_function_call, REG_DEP_ANTI);
}
}
- else if (GET_CODE (dest) == MEM)
+ else if (MEM_P (dest))
{
/* Writing memory. */
rtx t = dest;
if (!reload_completed && get_reg_known_equiv_p (regno))
{
rtx t = get_reg_known_value (regno);
- if (GET_CODE (t) == MEM)
+ if (MEM_P (t))
sched_analyze_2 (deps, XEXP (t, 0), insn);
}
}
for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
- if (GET_CODE (XEXP (u, 0)) != JUMP_INSN
+ if (!JUMP_P (XEXP (u, 0))
|| deps_may_trap_p (x))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
sched_analyze_2 (deps, x, insn);
/* Mark registers CLOBBERED or used by called function. */
- if (GET_CODE (insn) == CALL_INSN)
+ if (CALL_P (insn))
{
for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
{
reg_pending_barrier = MOVE_BARRIER;
}
- if (GET_CODE (insn) == JUMP_INSN)
+ if (JUMP_P (insn))
{
rtx next;
next = next_nonnote_insn (insn);
- if (next && GET_CODE (next) == BARRIER)
+ if (next && BARRIER_P (next))
reg_pending_barrier = TRUE_BARRIER;
else
{
tmp = SET_DEST (set);
if (GET_CODE (tmp) == SUBREG)
tmp = SUBREG_REG (tmp);
- if (GET_CODE (tmp) == REG)
+ if (REG_P (tmp))
dest_regno = REGNO (tmp);
else
goto end_call_group;
tmp = SUBREG_REG (tmp);
if ((GET_CODE (tmp) == PLUS
|| GET_CODE (tmp) == MINUS)
- && GET_CODE (XEXP (tmp, 0)) == REG
+ && REG_P (XEXP (tmp, 0))
&& REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
&& dest_regno == STACK_POINTER_REGNUM)
src_regno = STACK_POINTER_REGNUM;
- else if (GET_CODE (tmp) == REG)
+ else if (REG_P (tmp))
src_regno = REGNO (tmp);
else
goto end_call_group;
/* Before reload, if the previous block ended in a call, show that
we are inside a post-call group, so as to keep the lifetimes of
hard registers correct. */
- if (! reload_completed && GET_CODE (head) != CODE_LABEL)
+ if (! reload_completed && !LABEL_P (head))
{
insn = prev_nonnote_insn (head);
- if (insn && GET_CODE (insn) == CALL_INSN)
+ if (insn && CALL_P (insn))
deps->in_post_call_group_p = post_call_initial;
}
for (insn = head;; insn = NEXT_INSN (insn))
{
rtx link, end_seq, r0, set;
- if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
+ if (NONJUMP_INSN_P (insn) || JUMP_P (insn))
{
/* Clear out the stale LOG_LINKS from flow. */
free_INSN_LIST_list (&LOG_LINKS (insn));
/* Make each JUMP_INSN a scheduling barrier for memory
references. */
- if (GET_CODE (insn) == JUMP_INSN)
+ if (JUMP_P (insn))
{
/* Keep the list a reasonable size. */
if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
loop_notes = 0;
}
- else if (GET_CODE (insn) == CALL_INSN)
+ else if (CALL_P (insn))
{
int i;
/* See comments on reemit_notes as to why we do this.
??? Actually, the reemit_notes just say what is done, not why. */
- if (GET_CODE (insn) == NOTE
+ if (NOTE_P (insn)
&& (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
|| NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END
|| NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
the outermost libcall sequence. */
&& deps->libcall_block_tail_insn == 0
/* The sequence must start with a clobber of a register. */
- && GET_CODE (insn) == INSN
+ && NONJUMP_INSN_P (insn)
&& GET_CODE (PATTERN (insn)) == CLOBBER
- && (r0 = XEXP (PATTERN (insn), 0), GET_CODE (r0) == REG)
- && GET_CODE (XEXP (PATTERN (insn), 0)) == REG
+ && (r0 = XEXP (PATTERN (insn), 0), REG_P (r0))
+ && REG_P (XEXP (PATTERN (insn), 0))
/* The CLOBBER must also have a REG_LIBCALL note attached. */
&& (link = find_reg_note (insn, REG_LIBCALL, NULL_RTX)) != 0
&& (end_seq = XEXP (link, 0)) != 0
However, if we have enabled checking we might as well go
ahead and verify that add_dependence worked properly. */
- if (GET_CODE (from) == NOTE
+ if (NOTE_P (from)
|| INSN_DELETED_P (from)
|| (forward_dependency_cache != NULL
&& bitmap_bit_p (&forward_dependency_cache[INSN_LUID (from)],