/* Optimize jump instructions, for GNU compiler.
- Copyright (C) 1987, 88, 89, 91-98, 1999 Free Software Foundation, Inc.
+ Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997
+ 1998, 1999, 2000 Free Software Foundation, Inc.
This file is part of GNU CC.
#include "config.h"
#include "system.h"
#include "rtl.h"
+#include "tm_p.h"
#include "flags.h"
#include "hard-reg-set.h"
#include "regs.h"
#include "insn-flags.h"
#include "insn-attr.h"
#include "recog.h"
+#include "function.h"
#include "expr.h"
#include "real.h"
#include "except.h"
static rtx *jump_chain;
-/* List of labels referred to from initializers.
- These can never be deleted. */
-rtx forced_labels;
-
/* Maximum index in jump_chain. */
static int max_jump_chain;
static int cross_jump_death_matters = 0;
-static int init_label_info PROTO((rtx));
-static void delete_barrier_successors PROTO((rtx));
-static void mark_all_labels PROTO((rtx, int));
-static rtx delete_unreferenced_labels PROTO((rtx));
-static void delete_noop_moves PROTO((rtx));
-static int calculate_can_reach_end PROTO((rtx, int, int));
-static int duplicate_loop_exit_test PROTO((rtx));
-static void find_cross_jump PROTO((rtx, rtx, int, rtx *, rtx *));
-static void do_cross_jump PROTO((rtx, rtx, rtx));
-static int jump_back_p PROTO((rtx, rtx));
-static int tension_vector_labels PROTO((rtx, int));
-static void mark_jump_label PROTO((rtx, rtx, int));
-static void delete_computation PROTO((rtx));
-static void delete_from_jump_chain PROTO((rtx));
-static int delete_labelref_insn PROTO((rtx, rtx, int));
-static void mark_modified_reg PROTO((rtx, rtx));
-static void redirect_tablejump PROTO((rtx, rtx));
-static void jump_optimize_1 PROTO ((rtx, int, int, int, int));
-#ifndef HAVE_cc0
-static rtx find_insert_position PROTO((rtx, rtx));
+static int init_label_info PARAMS ((rtx));
+static void delete_barrier_successors PARAMS ((rtx));
+static void mark_all_labels PARAMS ((rtx, int));
+static rtx delete_unreferenced_labels PARAMS ((rtx));
+static void delete_noop_moves PARAMS ((rtx));
+static int calculate_can_reach_end PARAMS ((rtx, int));
+static int duplicate_loop_exit_test PARAMS ((rtx));
+static void find_cross_jump PARAMS ((rtx, rtx, int, rtx *, rtx *));
+static void do_cross_jump PARAMS ((rtx, rtx, rtx));
+static int jump_back_p PARAMS ((rtx, rtx));
+static int tension_vector_labels PARAMS ((rtx, int));
+static void mark_jump_label PARAMS ((rtx, rtx, int, int));
+static void delete_computation PARAMS ((rtx));
+static void delete_from_jump_chain PARAMS ((rtx));
+static int delete_labelref_insn PARAMS ((rtx, rtx, int));
+static void mark_modified_reg PARAMS ((rtx, rtx, void *));
+static void redirect_tablejump PARAMS ((rtx, rtx));
+static void jump_optimize_1 PARAMS ((rtx, int, int, int, int, int));
+#if ! defined(HAVE_cc0) && ! defined(HAVE_conditional_arithmetic)
+static rtx find_insert_position PARAMS ((rtx, rtx));
#endif
-
+static int returnjump_p_1 PARAMS ((rtx *, void *));
+static void delete_prior_computation PARAMS ((rtx, rtx));
+\f
/* Main external entry point into the jump optimizer. See comments before
jump_optimize_1 for descriptions of the arguments. */
void
int noop_moves;
int after_regscan;
{
- jump_optimize_1 (f, cross_jump, noop_moves, after_regscan, 0);
+ jump_optimize_1 (f, cross_jump, noop_moves, after_regscan, 0, 0);
}
/* Alternate entry into the jump optimizer. This entry point only rebuilds
rebuild_jump_labels (f)
rtx f;
{
- jump_optimize_1 (f, 0, 0, 0, 1);
+ jump_optimize_1 (f, 0, 0, 0, 1, 0);
}
+/* Alternate entry into the jump optimizer. Do only trivial optimizations. */
+void
+jump_optimize_minimal (f)
+ rtx f;
+{
+ jump_optimize_1 (f, 0, 0, 0, 0, 1);
+}
\f
/* Delete no-op jumps and optimize jumps to jumps
and jumps around jumps.
just determine whether control drops off the end of the function.
This case occurs when we have -W and not -O.
It works because `delete_insn' checks the value of `optimize'
- and refrains from actually deleting when that is 0. */
+ and refrains from actually deleting when that is 0.
+
+ If MINIMAL is nonzero, then we only perform trivial optimizations:
+
+ * Removal of unreachable code after BARRIERs.
+ * Removal of unreferenced CODE_LABELs.
+ * Removal of a jump to the next instruction.
+ * Removal of a conditional jump followed by an unconditional jump
+ to the same target as the conditional jump.
+ * Simplify a conditional jump around an unconditional jump.
+ * Simplify a jump to a jump.
+ * Delete extraneous line number notes.
+ */
static void
-jump_optimize_1 (f, cross_jump, noop_moves, after_regscan, mark_labels_only)
+jump_optimize_1 (f, cross_jump, noop_moves, after_regscan,
+ mark_labels_only, minimal)
rtx f;
int cross_jump;
int noop_moves;
int after_regscan;
int mark_labels_only;
+ int minimal;
{
register rtx insn, next;
int changed;
if (flag_exceptions && cross_jump)
init_insn_eh_region (f, max_uid);
- delete_barrier_successors (f);
+ if (! mark_labels_only)
+ delete_barrier_successors (f);
/* Leave some extra room for labels and duplicate exit test insns
we make. */
max_jump_chain = max_uid * 14 / 10;
- jump_chain = (rtx *) alloca (max_jump_chain * sizeof (rtx));
- bzero ((char *) jump_chain, max_jump_chain * sizeof (rtx));
+ jump_chain = (rtx *) xcalloc (max_jump_chain, sizeof (rtx));
mark_all_labels (f, cross_jump);
/* Quit now if we just wanted to rebuild the JUMP_LABEL and REG_LABEL
notes and recompute LABEL_NUSES. */
if (mark_labels_only)
- return;
+ goto end;
- exception_optimize ();
+ if (! minimal)
+ exception_optimize ();
last_insn = delete_unreferenced_labels (f);
- if (!optimize)
- {
- /* CAN_REACH_END is persistent for each function. Once set it should
- not be cleared. This is especially true for the case where we
- delete the NOTE_FUNCTION_END note. CAN_REACH_END is cleared by
- the front-end before compiling each function. */
- if (calculate_can_reach_end (last_insn, 1, 0))
- can_reach_end = 1;
-
- /* Zero the "deleted" flag of all the "deleted" insns. */
- for (insn = f; insn; insn = NEXT_INSN (insn))
- INSN_DELETED_P (insn) = 0;
-
- /* Show that the jump chain is not valid. */
- jump_chain = 0;
- return;
- }
-
-#ifdef HAVE_return
- if (HAVE_return)
- {
- /* If we fall through to the epilogue, see if we can insert a RETURN insn
- in front of it. If the machine allows it at this point (we might be
- after reload for a leaf routine), it will improve optimization for it
- to be there. */
- insn = get_last_insn ();
- while (insn && GET_CODE (insn) == NOTE)
- insn = PREV_INSN (insn);
-
- if (insn && GET_CODE (insn) != BARRIER)
- {
- emit_jump_insn (gen_return ());
- emit_barrier ();
- }
- }
-#endif
-
if (noop_moves)
delete_noop_moves (f);
This helps some of the optimizations below by having less insns
being jumped around. */
- if (! reload_completed && after_regscan)
+ if (optimize && ! reload_completed && after_regscan)
for (insn = f; insn; insn = next)
{
rtx set = single_set (insn);
might arrange to use that reg for real. */
&& REGNO_LAST_NOTE_UID (REGNO (SET_DEST (set))) == INSN_UID (insn)
&& ! side_effects_p (SET_SRC (set))
- && ! find_reg_note (insn, REG_RETVAL, 0))
+ && ! find_reg_note (insn, REG_RETVAL, 0)
+ /* An ADDRESSOF expression can turn into a use of the internal arg
+ pointer, so do not delete the initialization of the internal
+ arg pointer yet. If it is truly dead, flow will delete the
+ initializing insn. */
+ && SET_DEST (set) != current_function_internal_arg_pointer)
delete_insn (insn);
}
for (insn = f; insn; insn = next)
{
rtx reallabelprev;
- rtx temp, temp1, temp2, temp3, temp4, temp5, temp6;
+ rtx temp, temp1, temp2 = NULL_RTX, temp3, temp4, temp5, temp6;
rtx nlabel;
int this_is_simplejump, this_is_condjump, reversep = 0;
int this_is_condjump_in_parallel;
-#if 0
- /* If NOT the first iteration, if this is the last jump pass
- (just before final), do the special peephole optimizations.
- Avoiding the first iteration gives ordinary jump opts
- a chance to work before peephole opts. */
-
- if (reload_completed && !first && !flag_no_peephole)
- if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
- peephole (insn);
-#endif
-
- /* That could have deleted some insns after INSN, so check now
- what the following insn is. */
-
next = NEXT_INSN (insn);
/* See if this is a NOTE_INSN_LOOP_BEG followed by an unconditional
if (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
changed |= tension_vector_labels (PATTERN (insn), 1);
+ /* See if this jump goes to another jump and redirect if so. */
+ nlabel = follow_jumps (JUMP_LABEL (insn));
+ if (nlabel != JUMP_LABEL (insn))
+ changed |= redirect_jump (insn, nlabel);
+
+ if (! optimize || minimal)
+ continue;
+
/* If a dispatch table always goes to the same place,
get rid of it and replace the insn that uses it. */
int diff_vec_p = GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC;
int len = XVECLEN (pat, diff_vec_p);
rtx dispatch = prev_real_insn (insn);
+ rtx set;
for (i = 0; i < len; i++)
if (XEXP (XVECEXP (pat, diff_vec_p, i), 0)
!= XEXP (XVECEXP (pat, diff_vec_p, 0), 0))
break;
+
if (i == len
&& dispatch != 0
&& GET_CODE (dispatch) == JUMP_INSN
&& JUMP_LABEL (dispatch) != 0
- /* Don't mess with a casesi insn. */
- && !(GET_CODE (PATTERN (dispatch)) == SET
- && (GET_CODE (SET_SRC (PATTERN (dispatch)))
- == IF_THEN_ELSE))
+ /* Don't mess with a casesi insn.
+ XXX according to the comment before computed_jump_p(),
+ all casesi insns should be a parallel of the jump
+ and a USE of a LABEL_REF. */
+ && ! ((set = single_set (dispatch)) != NULL
+ && (GET_CODE (SET_SRC (set)) == IF_THEN_ELSE))
&& next_real_insn (JUMP_LABEL (dispatch)) == insn)
{
redirect_tablejump (dispatch,
}
}
- reallabelprev = prev_active_insn (JUMP_LABEL (insn));
-
/* If a jump references the end of the function, try to turn
it into a RETURN insn, possibly a conditional one. */
- if (JUMP_LABEL (insn)
+ if (JUMP_LABEL (insn) != 0
&& (next_active_insn (JUMP_LABEL (insn)) == 0
|| GET_CODE (PATTERN (next_active_insn (JUMP_LABEL (insn))))
== RETURN))
changed |= redirect_jump (insn, NULL_RTX);
+ reallabelprev = prev_active_insn (JUMP_LABEL (insn));
+
/* Detect jump to following insn. */
- if (reallabelprev == insn && condjump_p (insn))
+ if (reallabelprev == insn && this_is_condjump)
{
next = next_real_insn (JUMP_LABEL (insn));
delete_jump (insn);
continue;
}
+ /* Detect a conditional jump going to the same place
+ as an immediately following unconditional jump. */
+ else if (this_is_condjump
+ && (temp = next_active_insn (insn)) != 0
+ && simplejump_p (temp)
+ && (next_active_insn (JUMP_LABEL (insn))
+ == next_active_insn (JUMP_LABEL (temp))))
+ {
+ /* Don't mess up test coverage analysis. */
+ temp2 = temp;
+ if (flag_test_coverage && !reload_completed)
+ for (temp2 = insn; temp2 != temp; temp2 = NEXT_INSN (temp2))
+ if (GET_CODE (temp2) == NOTE && NOTE_LINE_NUMBER (temp2) > 0)
+ break;
+
+ if (temp2 == temp)
+ {
+ delete_jump (insn);
+ changed = 1;
+ continue;
+ }
+ }
+
+ /* Detect a conditional jump jumping over an unconditional jump. */
+
+ else if ((this_is_condjump || this_is_condjump_in_parallel)
+ && ! this_is_simplejump
+ && reallabelprev != 0
+ && GET_CODE (reallabelprev) == JUMP_INSN
+ && prev_active_insn (reallabelprev) == insn
+ && no_labels_between_p (insn, reallabelprev)
+ && simplejump_p (reallabelprev))
+ {
+ /* When we invert the unconditional jump, we will be
+ decrementing the usage count of its old label.
+ Make sure that we don't delete it now because that
+ might cause the following code to be deleted. */
+ rtx prev_uses = prev_nonnote_insn (reallabelprev);
+ rtx prev_label = JUMP_LABEL (insn);
+
+ if (prev_label)
+ ++LABEL_NUSES (prev_label);
+
+ if (invert_jump (insn, JUMP_LABEL (reallabelprev)))
+ {
+ /* It is very likely that if there are USE insns before
+ this jump, they hold REG_DEAD notes. These REG_DEAD
+ notes are no longer valid due to this optimization,
+ and will cause the life-analysis that following passes
+ (notably delayed-branch scheduling) to think that
+ these registers are dead when they are not.
+
+ To prevent this trouble, we just remove the USE insns
+ from the insn chain. */
+
+ while (prev_uses && GET_CODE (prev_uses) == INSN
+ && GET_CODE (PATTERN (prev_uses)) == USE)
+ {
+ rtx useless = prev_uses;
+ prev_uses = prev_nonnote_insn (prev_uses);
+ delete_insn (useless);
+ }
+
+ delete_insn (reallabelprev);
+ changed = 1;
+ }
+
+ /* We can now safely delete the label if it is unreferenced
+ since the delete_insn above has deleted the BARRIER. */
+ if (prev_label && --LABEL_NUSES (prev_label) == 0)
+ delete_insn (prev_label);
+
+ next = NEXT_INSN (insn);
+ }
+
/* If we have an unconditional jump preceded by a USE, try to put
the USE before the target and jump there. This simplifies many
of the optimizations below since we don't have to worry about
being branch to already has the identical USE or if code
never falls through to that label. */
- if (this_is_simplejump
- && (temp = prev_nonnote_insn (insn)) != 0
- && GET_CODE (temp) == INSN && GET_CODE (PATTERN (temp)) == USE
- && (temp1 = prev_nonnote_insn (JUMP_LABEL (insn))) != 0
- && (GET_CODE (temp1) == BARRIER
- || (GET_CODE (temp1) == INSN
- && rtx_equal_p (PATTERN (temp), PATTERN (temp1))))
- /* Don't do this optimization if we have a loop containing only
- the USE instruction, and the loop start label has a usage
- count of 1. This is because we will redo this optimization
- everytime through the outer loop, and jump opt will never
- exit. */
- && ! ((temp2 = prev_nonnote_insn (temp)) != 0
- && temp2 == JUMP_LABEL (insn)
- && LABEL_NUSES (temp2) == 1))
+ else if (this_is_simplejump
+ && (temp = prev_nonnote_insn (insn)) != 0
+ && GET_CODE (temp) == INSN
+ && GET_CODE (PATTERN (temp)) == USE
+ && (temp1 = prev_nonnote_insn (JUMP_LABEL (insn))) != 0
+ && (GET_CODE (temp1) == BARRIER
+ || (GET_CODE (temp1) == INSN
+ && rtx_equal_p (PATTERN (temp), PATTERN (temp1))))
+ /* Don't do this optimization if we have a loop containing
+ only the USE instruction, and the loop start label has
+ a usage count of 1. This is because we will redo this
+ optimization everytime through the outer loop, and jump
+ opt will never exit. */
+ && ! ((temp2 = prev_nonnote_insn (temp)) != 0
+ && temp2 == JUMP_LABEL (insn)
+ && LABEL_NUSES (temp2) == 1))
{
if (GET_CODE (temp1) == BARRIER)
{
redirect_jump (insn, get_label_before (temp1));
reallabelprev = prev_real_insn (temp1);
changed = 1;
+ next = NEXT_INSN (insn);
}
/* Simplify if (...) x = a; else x = b; by converting it
redirect_jump (p, target);
changed = 1;
+ next = NEXT_INSN (insn);
continue;
}
}
continue;
}
-#ifndef HAVE_cc0
+#if !defined(HAVE_cc0) && !defined(HAVE_conditional_arithmetic)
+
/* If we have if (...) x = exp; and branches are expensive,
EXP is a single insn, does not have any side effects, cannot
trap, and is not too costly, convert this to
the potential for conflicts. We also can't do this when we have
notes on the insn for the same reason as above.
+ If we have conditional arithmetic, this will make this
+ harder to optimize later and isn't needed, so don't do it
+ in that case either.
+
We set:
TEMP to the "x = exp;" insn.
}
#endif /* HAVE_cc0 */
+#ifdef HAVE_conditional_arithmetic
+ /* ??? This is disabled in genconfig, as this simple-minded
+ transformation can incredibly lengthen register lifetimes.
+
+ Consider this example from cexp.c's yyparse:
+
+ 234 (set (pc)
+ (if_then_else (ne (reg:DI 149) (const_int 0 [0x0]))
+ (label_ref 248) (pc)))
+ 237 (set (reg/i:DI 0 $0) (const_int 1 [0x1]))
+ 239 (set (pc) (label_ref 2382))
+ 248 (code_label ("yybackup"))
+
+ This will be transformed to:
+
+ 237 (set (reg/i:DI 0 $0)
+ (if_then_else:DI (eq (reg:DI 149) (const_int 0 [0x0]))
+ (const_int 1 [0x1]) (reg/i:DI 0 $0)))
+ 239 (set (pc)
+ (if_then_else (eq (reg:DI 149) (const_int 0 [0x0]))
+ (label_ref 2382) (pc)))
+
+ which, from this narrow viewpoint looks fine. Except that
+ between this and 3 other ocurrences of the same pattern, $0
+ is now live for basically the entire function, and we'll
+ get an abort in caller_save.
+
+ Any replacement for this code should recall that a set of
+ a register that is not live need not, and indeed should not,
+ be conditionalized. Either that, or delay the transformation
+ until after register allocation. */
+
+ /* See if this is a conditional jump around a small number of
+ instructions that we can conditionalize. Don't do this before
+ the initial CSE pass or after reload.
+
+ We reject any insns that have side effects or may trap.
+ Strictly speaking, this is not needed since the machine may
+ support conditionalizing these too, but we won't deal with that
+ now. Specifically, this means that we can't conditionalize a
+ CALL_INSN, which some machines, such as the ARC, can do, but
+ this is a very minor optimization. */
+ if (this_is_condjump && ! this_is_simplejump
+ && cse_not_expected && ! reload_completed
+ && BRANCH_COST > 2
+ && can_reverse_comparison_p (XEXP (SET_SRC (PATTERN (insn)), 0),
+ insn))
+ {
+ rtx ourcond = XEXP (SET_SRC (PATTERN (insn)), 0);
+ int num_insns = 0;
+ char *storage = (char *) oballoc (0);
+ int last_insn = 0, failed = 0;
+ rtx changed_jump = 0;
+
+ ourcond = gen_rtx (reverse_condition (GET_CODE (ourcond)),
+ VOIDmode, XEXP (ourcond, 0),
+ XEXP (ourcond, 1));
+
+ /* Scan forward BRANCH_COST real insns looking for the JUMP_LABEL
+ of this insn. We see if we think we can conditionalize the
+ insns we pass. For now, we only deal with insns that have
+ one SET. We stop after an insn that modifies anything in
+ OURCOND, if we have too many insns, or if we have an insn
+ with a side effect or that may trip. Note that we will
+ be modifying any unconditional jumps we encounter to be
+ conditional; this will have the effect of also doing this
+ optimization on the "else" the next time around. */
+ for (temp1 = NEXT_INSN (insn);
+ num_insns <= BRANCH_COST && ! failed && temp1 != 0
+ && GET_CODE (temp1) != CODE_LABEL;
+ temp1 = NEXT_INSN (temp1))
+ {
+ /* Ignore everything but an active insn. */
+ if (GET_RTX_CLASS (GET_CODE (temp1)) != 'i'
+ || GET_CODE (PATTERN (temp1)) == USE
+ || GET_CODE (PATTERN (temp1)) == CLOBBER)
+ continue;
+
+ /* If this was an unconditional jump, record it since we'll
+ need to remove the BARRIER if we succeed. We can only
+ have one such jump since there must be a label after
+ the BARRIER and it's either ours, in which case it's the
+ only one or some other, in which case we'd fail.
+ Likewise if it's a CALL_INSN followed by a BARRIER. */
+
+ if (simplejump_p (temp1)
+ || (GET_CODE (temp1) == CALL_INSN
+ && NEXT_INSN (temp1) != 0
+ && GET_CODE (NEXT_INSN (temp1)) == BARRIER))
+ {
+ if (changed_jump == 0)
+ changed_jump = temp1;
+ else
+ changed_jump
+ = gen_rtx_INSN_LIST (VOIDmode, temp1, changed_jump);
+ }
+
+ /* See if we are allowed another insn and if this insn
+ if one we think we may be able to handle. */
+ if (++num_insns > BRANCH_COST
+ || last_insn
+ || (((temp2 = single_set (temp1)) == 0
+ || side_effects_p (SET_SRC (temp2))
+ || may_trap_p (SET_SRC (temp2)))
+ && GET_CODE (temp1) != CALL_INSN))
+ failed = 1;
+ else if (temp2 != 0)
+ validate_change (temp1, &SET_SRC (temp2),
+ gen_rtx_IF_THEN_ELSE
+ (GET_MODE (SET_DEST (temp2)),
+ copy_rtx (ourcond),
+ SET_SRC (temp2), SET_DEST (temp2)),
+ 1);
+ else
+ {
+ /* This is a CALL_INSN that doesn't have a SET. */
+ rtx *call_loc = &PATTERN (temp1);
+
+ if (GET_CODE (*call_loc) == PARALLEL)
+ call_loc = &XVECEXP (*call_loc, 0, 0);
+
+ validate_change (temp1, call_loc,
+ gen_rtx_IF_THEN_ELSE
+ (VOIDmode, copy_rtx (ourcond),
+ *call_loc, const0_rtx),
+ 1);
+ }
+
+
+ if (modified_in_p (ourcond, temp1))
+ last_insn = 1;
+ }
+
+ /* If we've reached our jump label, haven't failed, and all
+ the changes above are valid, we can delete this jump
+ insn. Also remove a BARRIER after any jump that used
+ to be unconditional and remove any REG_EQUAL or REG_EQUIV
+ that might have previously been present on insns we
+ made conditional. */
+ if (temp1 == JUMP_LABEL (insn) && ! failed
+ && apply_change_group ())
+ {
+ for (temp1 = NEXT_INSN (insn); temp1 != JUMP_LABEL (insn);
+ temp1 = NEXT_INSN (temp1))
+ if (GET_RTX_CLASS (GET_CODE (temp1)) == 'i')
+ for (temp2 = REG_NOTES (temp1); temp2 != 0;
+ temp2 = XEXP (temp2, 1))
+ if (REG_NOTE_KIND (temp2) == REG_EQUAL
+ || REG_NOTE_KIND (temp2) == REG_EQUIV)
+ remove_note (temp1, temp2);
+
+ if (changed_jump != 0)
+ {
+ while (GET_CODE (changed_jump) == INSN_LIST)
+ {
+ delete_barrier (NEXT_INSN (XEXP (changed_jump, 0)));
+ changed_jump = XEXP (changed_jump, 1);
+ }
+
+ delete_barrier (NEXT_INSN (changed_jump));
+ }
+
+ delete_insn (insn);
+ changed = 1;
+ continue;
+ }
+ else
+ {
+ cancel_changes (0);
+ obfree (storage);
+ }
+ }
+#endif
+ /* If branches are expensive, convert
+ if (foo) bar++; to bar += (foo != 0);
+ and similarly for "bar--;"
+
+ INSN is the conditional branch around the arithmetic. We set:
+
+ TEMP is the arithmetic insn.
+ TEMP1 is the SET doing the arithmetic.
+ TEMP2 is the operand being incremented or decremented.
+ TEMP3 to the condition being tested.
+ TEMP4 to the earliest insn used to find the condition. */
+
+ if ((BRANCH_COST >= 2
+#ifdef HAVE_incscc
+ || HAVE_incscc
+#endif
+#ifdef HAVE_decscc
+ || HAVE_decscc
+#endif
+ )
+ && ! reload_completed
+ && this_is_condjump && ! this_is_simplejump
+ && (temp = next_nonnote_insn (insn)) != 0
+ && (temp1 = single_set (temp)) != 0
+ && (temp2 = SET_DEST (temp1),
+ GET_MODE_CLASS (GET_MODE (temp2)) == MODE_INT)
+ && GET_CODE (SET_SRC (temp1)) == PLUS
+ && (XEXP (SET_SRC (temp1), 1) == const1_rtx
+ || XEXP (SET_SRC (temp1), 1) == constm1_rtx)
+ && rtx_equal_p (temp2, XEXP (SET_SRC (temp1), 0))
+ && ! side_effects_p (temp2)
+ && ! may_trap_p (temp2)
+ /* INSN must either branch to the insn after TEMP or the insn
+ after TEMP must branch to the same place as INSN. */
+ && (reallabelprev == temp
+ || ((temp3 = next_active_insn (temp)) != 0
+ && simplejump_p (temp3)
+ && JUMP_LABEL (temp3) == JUMP_LABEL (insn)))
+ && (temp3 = get_condition (insn, &temp4)) != 0
+ /* We must be comparing objects whose modes imply the size.
+ We could handle BLKmode if (1) emit_store_flag could
+ and (2) we could find the size reliably. */
+ && GET_MODE (XEXP (temp3, 0)) != BLKmode
+ && can_reverse_comparison_p (temp3, insn))
+ {
+ rtx temp6, target = 0, seq, init_insn = 0, init = temp2;
+ enum rtx_code code = reverse_condition (GET_CODE (temp3));
+
+ start_sequence ();
+
+ /* It must be the case that TEMP2 is not modified in the range
+ [TEMP4, INSN). The one exception we make is if the insn
+ before INSN sets TEMP2 to something which is also unchanged
+ in that range. In that case, we can move the initialization
+ into our sequence. */
+
+ if ((temp5 = prev_active_insn (insn)) != 0
+ && no_labels_between_p (temp5, insn)
+ && GET_CODE (temp5) == INSN
+ && (temp6 = single_set (temp5)) != 0
+ && rtx_equal_p (temp2, SET_DEST (temp6))
+ && (CONSTANT_P (SET_SRC (temp6))
+ || GET_CODE (SET_SRC (temp6)) == REG
+ || GET_CODE (SET_SRC (temp6)) == SUBREG))
+ {
+ emit_insn (PATTERN (temp5));
+ init_insn = temp5;
+ init = SET_SRC (temp6);
+ }
+
+ if (CONSTANT_P (init)
+ || ! reg_set_between_p (init, PREV_INSN (temp4), insn))
+ target = emit_store_flag (gen_reg_rtx (GET_MODE (temp2)), code,
+ XEXP (temp3, 0), XEXP (temp3, 1),
+ VOIDmode,
+ (code == LTU || code == LEU
+ || code == GTU || code == GEU), 1);
+
+ /* If we can do the store-flag, do the addition or
+ subtraction. */
+
+ if (target)
+ target = expand_binop (GET_MODE (temp2),
+ (XEXP (SET_SRC (temp1), 1) == const1_rtx
+ ? add_optab : sub_optab),
+ temp2, target, temp2, 0, OPTAB_WIDEN);
+
+ if (target != 0)
+ {
+ /* Put the result back in temp2 in case it isn't already.
+ Then replace the jump, possible a CC0-setting insn in
+ front of the jump, and TEMP, with the sequence we have
+ made. */
+
+ if (target != temp2)
+ emit_move_insn (temp2, target);
+
+ seq = get_insns ();
+ end_sequence ();
+
+ emit_insns_before (seq, temp4);
+ delete_insn (temp);
+
+ if (init_insn)
+ delete_insn (init_insn);
+
+ next = NEXT_INSN (insn);
+#ifdef HAVE_cc0
+ delete_insn (prev_nonnote_insn (insn));
+#endif
+ delete_insn (insn);
+
+ if (after_regscan)
+ {
+ reg_scan_update (seq, NEXT_INSN (next), old_max_reg);
+ old_max_reg = max_reg_num ();
+ }
+
+ changed = 1;
+ continue;
+ }
+ else
+ end_sequence ();
+ }
+
/* Try to use a conditional move (if the target has them), or a
- store-flag insn. The general case is:
+ store-flag insn. If the target has conditional arithmetic as
+ well as conditional move, the above code will have done something.
+ Note that we prefer the above code since it is more general: the
+ code below can make changes that require work to undo.
+
+ The general case here is:
1) x = a; if (...) x = b; and
2) if (...) x = b;
INSN here is the jump around the store. We set:
- TEMP to the "x = b;" insn.
+ TEMP to the "x op= b;" insn.
TEMP1 to X.
TEMP2 to B.
TEMP3 to A (X in the second case).
TEMP4 to the condition being tested.
- TEMP5 to the earliest insn used to find the condition. */
+ TEMP5 to the earliest insn used to find the condition.
+ TEMP6 to the SET of TEMP. */
if (/* We can't do this after reload has completed. */
! reload_completed
+#ifdef HAVE_conditional_arithmetic
+ /* Defer this until after CSE so the above code gets the
+ first crack at it. */
+ && cse_not_expected
+#endif
&& this_is_condjump && ! this_is_simplejump
/* Set TEMP to the "x = b;" insn. */
&& (temp = next_nonnote_insn (insn)) != 0
&& GET_CODE (temp) == INSN
- && GET_CODE (PATTERN (temp)) == SET
- && GET_CODE (temp1 = SET_DEST (PATTERN (temp))) == REG
+ && (temp6 = single_set (temp)) != NULL_RTX
+ && GET_CODE (temp1 = SET_DEST (temp6)) == REG
&& (! SMALL_REGISTER_CLASSES
|| REGNO (temp1) >= FIRST_PSEUDO_REGISTER)
- && ! side_effects_p (temp2 = SET_SRC (PATTERN (temp)))
+ && ! side_effects_p (temp2 = SET_SRC (temp6))
&& ! may_trap_p (temp2)
/* Allow either form, but prefer the former if both apply.
There is no point in using the old value of TEMP1 if
enum rtx_code code = GET_CODE (temp4);
rtx var = temp1;
rtx cond0, cond1, aval, bval;
- rtx target;
+ rtx target, new_insn;
/* Copy the compared variables into cond0 and cond1, so that
any side effects performed in or after the old comparison,
insn? After all, we're going to delete it. We'd have
to modify emit_conditional_move to take a comparison rtx
instead or write a new function. */
- cond0 = gen_reg_rtx (GET_MODE (XEXP (temp4, 0)));
+
/* We want the target to be able to simplify comparisons with
zero (and maybe other constants as well), so don't create
pseudos for them. There's no need to either. */
+ if (GET_CODE (XEXP (temp4, 0)) == CONST_INT
+ || GET_CODE (XEXP (temp4, 0)) == CONST_DOUBLE)
+ cond0 = XEXP (temp4, 0);
+ else
+ cond0 = gen_reg_rtx (GET_MODE (XEXP (temp4, 0)));
+
if (GET_CODE (XEXP (temp4, 1)) == CONST_INT
|| GET_CODE (XEXP (temp4, 1)) == CONST_DOUBLE)
cond1 = XEXP (temp4, 1);
else
cond1 = gen_reg_rtx (GET_MODE (XEXP (temp4, 1)));
+ /* Careful about copying these values -- an IOR or what may
+ need to do other things, like clobber flags. */
+ /* ??? Assume for the moment that AVAL is ok. */
aval = temp3;
- bval = temp2;
start_sequence ();
+
+ /* We're dealing with a single_set insn with no side effects
+ on SET_SRC. We do need to be reasonably certain that if
+ we need to force BVAL into a register that we won't
+ clobber the flags -- general_operand should suffice. */
+ if (general_operand (temp2, GET_MODE (var)))
+ bval = temp2;
+ else
+ {
+ bval = gen_reg_rtx (GET_MODE (var));
+ new_insn = copy_rtx (temp);
+ temp6 = single_set (new_insn);
+ SET_DEST (temp6) = bval;
+ emit_insn (PATTERN (new_insn));
+ }
+
target = emit_conditional_move (var, code,
cond0, cond1, VOIDmode,
aval, bval, GET_MODE (var),
5) if (...) x = b; if jumps are even more expensive. */
if (GET_MODE_CLASS (GET_MODE (temp1)) == MODE_INT
+ /* We will be passing this as operand into expand_and. No
+ good if it's not valid as an operand. */
+ && general_operand (temp2, GET_MODE (temp2))
&& ((GET_CODE (temp3) == CONST_INT)
/* Make the latter case look like
x = x; if (...) x = 0; */
}
}
- /* If branches are expensive, convert
- if (foo) bar++; to bar += (foo != 0);
- and similarly for "bar--;"
-
- INSN is the conditional branch around the arithmetic. We set:
-
- TEMP is the arithmetic insn.
- TEMP1 is the SET doing the arithmetic.
- TEMP2 is the operand being incremented or decremented.
- TEMP3 to the condition being tested.
- TEMP4 to the earliest insn used to find the condition. */
-
- if ((BRANCH_COST >= 2
-#ifdef HAVE_incscc
- || HAVE_incscc
-#endif
-#ifdef HAVE_decscc
- || HAVE_decscc
-#endif
- )
- && ! reload_completed
- && this_is_condjump && ! this_is_simplejump
- && (temp = next_nonnote_insn (insn)) != 0
- && (temp1 = single_set (temp)) != 0
- && (temp2 = SET_DEST (temp1),
- GET_MODE_CLASS (GET_MODE (temp2)) == MODE_INT)
- && GET_CODE (SET_SRC (temp1)) == PLUS
- && (XEXP (SET_SRC (temp1), 1) == const1_rtx
- || XEXP (SET_SRC (temp1), 1) == constm1_rtx)
- && rtx_equal_p (temp2, XEXP (SET_SRC (temp1), 0))
- && ! side_effects_p (temp2)
- && ! may_trap_p (temp2)
- /* INSN must either branch to the insn after TEMP or the insn
- after TEMP must branch to the same place as INSN. */
- && (reallabelprev == temp
- || ((temp3 = next_active_insn (temp)) != 0
- && simplejump_p (temp3)
- && JUMP_LABEL (temp3) == JUMP_LABEL (insn)))
- && (temp3 = get_condition (insn, &temp4)) != 0
- /* We must be comparing objects whose modes imply the size.
- We could handle BLKmode if (1) emit_store_flag could
- and (2) we could find the size reliably. */
- && GET_MODE (XEXP (temp3, 0)) != BLKmode
- && can_reverse_comparison_p (temp3, insn))
- {
- rtx temp6, target = 0, seq, init_insn = 0, init = temp2;
- enum rtx_code code = reverse_condition (GET_CODE (temp3));
-
- start_sequence ();
-
- /* It must be the case that TEMP2 is not modified in the range
- [TEMP4, INSN). The one exception we make is if the insn
- before INSN sets TEMP2 to something which is also unchanged
- in that range. In that case, we can move the initialization
- into our sequence. */
-
- if ((temp5 = prev_active_insn (insn)) != 0
- && no_labels_between_p (temp5, insn)
- && GET_CODE (temp5) == INSN
- && (temp6 = single_set (temp5)) != 0
- && rtx_equal_p (temp2, SET_DEST (temp6))
- && (CONSTANT_P (SET_SRC (temp6))
- || GET_CODE (SET_SRC (temp6)) == REG
- || GET_CODE (SET_SRC (temp6)) == SUBREG))
- {
- emit_insn (PATTERN (temp5));
- init_insn = temp5;
- init = SET_SRC (temp6);
- }
-
- if (CONSTANT_P (init)
- || ! reg_set_between_p (init, PREV_INSN (temp4), insn))
- target = emit_store_flag (gen_reg_rtx (GET_MODE (temp2)), code,
- XEXP (temp3, 0), XEXP (temp3, 1),
- VOIDmode,
- (code == LTU || code == LEU
- || code == GTU || code == GEU), 1);
-
- /* If we can do the store-flag, do the addition or
- subtraction. */
-
- if (target)
- target = expand_binop (GET_MODE (temp2),
- (XEXP (SET_SRC (temp1), 1) == const1_rtx
- ? add_optab : sub_optab),
- temp2, target, temp2, 0, OPTAB_WIDEN);
-
- if (target != 0)
- {
- /* Put the result back in temp2 in case it isn't already.
- Then replace the jump, possible a CC0-setting insn in
- front of the jump, and TEMP, with the sequence we have
- made. */
-
- if (target != temp2)
- emit_move_insn (temp2, target);
-
- seq = get_insns ();
- end_sequence ();
-
- emit_insns_before (seq, temp4);
- delete_insn (temp);
-
- if (init_insn)
- delete_insn (init_insn);
-
- next = NEXT_INSN (insn);
-#ifdef HAVE_cc0
- delete_insn (prev_nonnote_insn (insn));
-#endif
- delete_insn (insn);
-
- if (after_regscan)
- {
- reg_scan_update (seq, NEXT_INSN (next), old_max_reg);
- old_max_reg = max_reg_num ();
- }
-
- changed = 1;
- continue;
- }
- else
- end_sequence ();
- }
/* Simplify if (...) x = 1; else {...} if (x) ...
We recognize this case scanning backwards as well.
}
}
#endif
- /* Detect a conditional jump going to the same place
- as an immediately following unconditional jump. */
- else if (this_is_condjump
- && (temp = next_active_insn (insn)) != 0
- && simplejump_p (temp)
- && (next_active_insn (JUMP_LABEL (insn))
- == next_active_insn (JUMP_LABEL (temp))))
- {
- rtx tem = temp;
-
- /* ??? Optional. Disables some optimizations, but makes
- gcov output more accurate with -O. */
- if (flag_test_coverage && !reload_completed)
- for (tem = insn; tem != temp; tem = NEXT_INSN (tem))
- if (GET_CODE (tem) == NOTE && NOTE_LINE_NUMBER (tem) > 0)
- break;
-
- if (tem == temp)
- {
- delete_jump (insn);
- changed = 1;
- continue;
- }
- }
#ifdef HAVE_trap
/* Detect a conditional jump jumping over an unconditional trap. */
else if (HAVE_trap
&& ! this_is_simplejump
&& swap_condition (GET_CODE (temp2)) == GET_CODE (tc)
&& rtx_equal_p (XEXP (tc, 0), XEXP (temp2, 0))
- && rtx_equal_p (XEXP (tc, 1), XEXP (temp2, 1))
- && redirect_jump (insn, get_label_after (temp)))
- {
- changed = 1;
- continue;
- }
- }
-#endif
-
- /* Detect a conditional jump jumping over an unconditional jump. */
-
- else if ((this_is_condjump || this_is_condjump_in_parallel)
- && ! this_is_simplejump
- && reallabelprev != 0
- && GET_CODE (reallabelprev) == JUMP_INSN
- && prev_active_insn (reallabelprev) == insn
- && no_labels_between_p (insn, reallabelprev)
- && simplejump_p (reallabelprev))
- {
- /* When we invert the unconditional jump, we will be
- decrementing the usage count of its old label.
- Make sure that we don't delete it now because that
- might cause the following code to be deleted. */
- rtx prev_uses = prev_nonnote_insn (reallabelprev);
- rtx prev_label = JUMP_LABEL (insn);
-
- if (prev_label)
- ++LABEL_NUSES (prev_label);
-
- if (invert_jump (insn, JUMP_LABEL (reallabelprev)))
- {
- /* It is very likely that if there are USE insns before
- this jump, they hold REG_DEAD notes. These REG_DEAD
- notes are no longer valid due to this optimization,
- and will cause the life-analysis that following passes
- (notably delayed-branch scheduling) to think that
- these registers are dead when they are not.
-
- To prevent this trouble, we just remove the USE insns
- from the insn chain. */
-
- while (prev_uses && GET_CODE (prev_uses) == INSN
- && GET_CODE (PATTERN (prev_uses)) == USE)
- {
- rtx useless = prev_uses;
- prev_uses = prev_nonnote_insn (prev_uses);
- delete_insn (useless);
- }
-
- delete_insn (reallabelprev);
- next = insn;
- changed = 1;
- }
-
- /* We can now safely delete the label if it is unreferenced
- since the delete_insn above has deleted the BARRIER. */
- if (prev_label && --LABEL_NUSES (prev_label) == 0)
- delete_insn (prev_label);
- continue;
- }
- else
- {
- /* Detect a jump to a jump. */
-
- nlabel = follow_jumps (JUMP_LABEL (insn));
- if (nlabel != JUMP_LABEL (insn)
- && redirect_jump (insn, nlabel))
+ && rtx_equal_p (XEXP (tc, 1), XEXP (temp2, 1))
+ && redirect_jump (insn, get_label_after (temp)))
{
changed = 1;
- next = insn;
+ continue;
}
-
- /* Look for if (foo) bar; else break; */
- /* The insns look like this:
- insn = condjump label1;
- ...range1 (some insns)...
- jump label2;
- label1:
- ...range2 (some insns)...
- jump somewhere unconditionally
- label2: */
- {
- rtx label1 = next_label (insn);
- rtx range1end = label1 ? prev_active_insn (label1) : 0;
- /* Don't do this optimization on the first round, so that
- jump-around-a-jump gets simplified before we ask here
- whether a jump is unconditional.
-
- Also don't do it when we are called after reload since
- it will confuse reorg. */
- if (! first
- && (reload_completed ? ! flag_delayed_branch : 1)
- /* Make sure INSN is something we can invert. */
- && condjump_p (insn)
- && label1 != 0
- && JUMP_LABEL (insn) == label1
- && LABEL_NUSES (label1) == 1
- && GET_CODE (range1end) == JUMP_INSN
- && simplejump_p (range1end))
- {
- rtx label2 = next_label (label1);
- rtx range2end = label2 ? prev_active_insn (label2) : 0;
- if (range1end != range2end
- && JUMP_LABEL (range1end) == label2
- && GET_CODE (range2end) == JUMP_INSN
- && GET_CODE (NEXT_INSN (range2end)) == BARRIER
- /* Invert the jump condition, so we
- still execute the same insns in each case. */
- && invert_jump (insn, label1))
- {
- rtx range1beg = next_active_insn (insn);
- rtx range2beg = next_active_insn (label1);
- rtx range1after, range2after;
- rtx range1before, range2before;
- rtx rangenext;
-
- /* Include in each range any notes before it, to be
- sure that we get the line number note if any, even
- if there are other notes here. */
- while (PREV_INSN (range1beg)
- && GET_CODE (PREV_INSN (range1beg)) == NOTE)
- range1beg = PREV_INSN (range1beg);
-
- while (PREV_INSN (range2beg)
- && GET_CODE (PREV_INSN (range2beg)) == NOTE)
- range2beg = PREV_INSN (range2beg);
-
- /* Don't move NOTEs for blocks or loops; shift them
- outside the ranges, where they'll stay put. */
- range1beg = squeeze_notes (range1beg, range1end);
- range2beg = squeeze_notes (range2beg, range2end);
-
- /* Get current surrounds of the 2 ranges. */
- range1before = PREV_INSN (range1beg);
- range2before = PREV_INSN (range2beg);
- range1after = NEXT_INSN (range1end);
- range2after = NEXT_INSN (range2end);
-
- /* Splice range2 where range1 was. */
- NEXT_INSN (range1before) = range2beg;
- PREV_INSN (range2beg) = range1before;
- NEXT_INSN (range2end) = range1after;
- PREV_INSN (range1after) = range2end;
- /* Splice range1 where range2 was. */
- NEXT_INSN (range2before) = range1beg;
- PREV_INSN (range1beg) = range2before;
- NEXT_INSN (range1end) = range2after;
- PREV_INSN (range2after) = range1end;
-
- /* Check for a loop end note between the end of
- range2, and the next code label. If there is one,
- then what we have really seen is
- if (foo) break; end_of_loop;
- and moved the break sequence outside the loop.
- We must move the LOOP_END note to where the
- loop really ends now, or we will confuse loop
- optimization. Stop if we find a LOOP_BEG note
- first, since we don't want to move the LOOP_END
- note in that case. */
- for (;range2after != label2; range2after = rangenext)
- {
- rangenext = NEXT_INSN (range2after);
- if (GET_CODE (range2after) == NOTE)
- {
- if (NOTE_LINE_NUMBER (range2after)
- == NOTE_INSN_LOOP_END)
- {
- NEXT_INSN (PREV_INSN (range2after))
- = rangenext;
- PREV_INSN (rangenext)
- = PREV_INSN (range2after);
- PREV_INSN (range2after)
- = PREV_INSN (range1beg);
- NEXT_INSN (range2after) = range1beg;
- NEXT_INSN (PREV_INSN (range1beg))
- = range2after;
- PREV_INSN (range1beg) = range2after;
- }
- else if (NOTE_LINE_NUMBER (range2after)
- == NOTE_INSN_LOOP_BEG)
- break;
- }
- }
- changed = 1;
- continue;
- }
- }
- }
-
+ }
+#endif
+ else
+ {
/* Now that the jump has been tensioned,
try cross jumping: check for identical code
before the jump and before its target label. */
}
}
-#ifdef HAVE_return
- if (HAVE_return)
- {
- /* If we fall through to the epilogue, see if we can insert a RETURN insn
- in front of it. If the machine allows it at this point (we might be
- after reload for a leaf routine), it will improve optimization for it
- to be there. We do this both here and at the start of this pass since
- the RETURN might have been deleted by some of our optimizations. */
- insn = get_last_insn ();
- while (insn && GET_CODE (insn) == NOTE)
- insn = PREV_INSN (insn);
-
- if (insn && GET_CODE (insn) != BARRIER)
- {
- emit_jump_insn (gen_return ());
- emit_barrier ();
- }
- }
-#endif
-
/* CAN_REACH_END is persistent for each function. Once set it should
not be cleared. This is especially true for the case where we
delete the NOTE_FUNCTION_END note. CAN_REACH_END is cleared by
the front-end before compiling each function. */
- if (calculate_can_reach_end (last_insn, 0, 1))
+ if (! minimal && calculate_can_reach_end (last_insn, optimize != 0))
can_reach_end = 1;
- /* Show JUMP_CHAIN no longer valid. */
+end:
+ /* Clean up. */
+ free (jump_chain);
jump_chain = 0;
}
\f
/* Delete insns following barriers, up to next label.
Also delete no-op jumps created by gcse. */
+
static void
delete_barrier_successors (f)
rtx f;
if (GET_CODE (insn) == BARRIER)
{
insn = NEXT_INSN (insn);
+
+ never_reached_warning (insn);
+
while (insn != 0 && GET_CODE (insn) != CODE_LABEL)
{
if (GET_CODE (insn) == NOTE
}
/* INSN is now the code_label. */
}
+
/* Also remove (set (pc) (pc)) insns which can be created by
gcse. We eliminate such insns now to avoid having them
cause problems later. */
else if (GET_CODE (insn) == JUMP_INSN
+ && GET_CODE (PATTERN (insn)) == SET
&& SET_SRC (PATTERN (insn)) == pc_rtx
&& SET_DEST (PATTERN (insn)) == pc_rtx)
insn = delete_insn (insn);
for (insn = f; insn; insn = NEXT_INSN (insn))
if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
{
- mark_jump_label (PATTERN (insn), insn, cross_jump);
+ if (GET_CODE (insn) == CALL_INSN
+ && GET_CODE (PATTERN (insn)) == CALL_PLACEHOLDER)
+ {
+ mark_all_labels (XEXP (PATTERN (insn), 0), cross_jump);
+ mark_all_labels (XEXP (PATTERN (insn), 1), cross_jump);
+ mark_all_labels (XEXP (PATTERN (insn), 2), cross_jump);
+ continue;
+ }
+
+ mark_jump_label (PATTERN (insn), insn, cross_jump, 0);
if (! INSN_DELETED_P (insn) && GET_CODE (insn) == JUMP_INSN)
{
if (JUMP_LABEL (insn) != 0 && simplejump_p (insn))
for (insn = f; insn; )
{
- if (GET_CODE (insn) == CODE_LABEL && LABEL_NUSES (insn) == 0)
+ if (GET_CODE (insn) == CODE_LABEL
+ && LABEL_NUSES (insn) == 0
+ && LABEL_ALTERNATE_NAME (insn) == NULL)
insn = delete_insn (insn);
else
{
{
register rtx body = PATTERN (insn);
-/* Combine stack_adjusts with following push_insns. */
-#ifdef PUSH_ROUNDING
- if (GET_CODE (body) == SET
- && SET_DEST (body) == stack_pointer_rtx
- && GET_CODE (SET_SRC (body)) == PLUS
- && XEXP (SET_SRC (body), 0) == stack_pointer_rtx
- && GET_CODE (XEXP (SET_SRC (body), 1)) == CONST_INT
- && INTVAL (XEXP (SET_SRC (body), 1)) > 0)
- {
- rtx p;
- rtx stack_adjust_insn = insn;
- int stack_adjust_amount = INTVAL (XEXP (SET_SRC (body), 1));
- int total_pushed = 0;
- int pushes = 0;
-
- /* Find all successive push insns. */
- p = insn;
- /* Don't convert more than three pushes;
- that starts adding too many displaced addresses
- and the whole thing starts becoming a losing
- proposition. */
- while (pushes < 3)
- {
- rtx pbody, dest;
- p = next_nonnote_insn (p);
- if (p == 0 || GET_CODE (p) != INSN)
- break;
- pbody = PATTERN (p);
- if (GET_CODE (pbody) != SET)
- break;
- dest = SET_DEST (pbody);
- /* Allow a no-op move between the adjust and the push. */
- if (GET_CODE (dest) == REG
- && GET_CODE (SET_SRC (pbody)) == REG
- && REGNO (dest) == REGNO (SET_SRC (pbody)))
- continue;
- if (! (GET_CODE (dest) == MEM
- && GET_CODE (XEXP (dest, 0)) == POST_INC
- && XEXP (XEXP (dest, 0), 0) == stack_pointer_rtx))
- break;
- pushes++;
- if (total_pushed + GET_MODE_SIZE (GET_MODE (SET_DEST (pbody)))
- > stack_adjust_amount)
- break;
- total_pushed += GET_MODE_SIZE (GET_MODE (SET_DEST (pbody)));
- }
-
- /* Discard the amount pushed from the stack adjust;
- maybe eliminate it entirely. */
- if (total_pushed >= stack_adjust_amount)
- {
- delete_computation (stack_adjust_insn);
- total_pushed = stack_adjust_amount;
- }
- else
- XEXP (SET_SRC (PATTERN (stack_adjust_insn)), 1)
- = GEN_INT (stack_adjust_amount - total_pushed);
-
- /* Change the appropriate push insns to ordinary stores. */
- p = insn;
- while (total_pushed > 0)
- {
- rtx pbody, dest;
- p = next_nonnote_insn (p);
- if (GET_CODE (p) != INSN)
- break;
- pbody = PATTERN (p);
- if (GET_CODE (pbody) != SET)
- break;
- dest = SET_DEST (pbody);
- /* Allow a no-op move between the adjust and the push. */
- if (GET_CODE (dest) == REG
- && GET_CODE (SET_SRC (pbody)) == REG
- && REGNO (dest) == REGNO (SET_SRC (pbody)))
- continue;
- if (! (GET_CODE (dest) == MEM
- && GET_CODE (XEXP (dest, 0)) == POST_INC
- && XEXP (XEXP (dest, 0), 0) == stack_pointer_rtx))
- break;
- total_pushed -= GET_MODE_SIZE (GET_MODE (SET_DEST (pbody)));
- /* If this push doesn't fully fit in the space
- of the stack adjust that we deleted,
- make another stack adjust here for what we
- didn't use up. There should be peepholes
- to recognize the resulting sequence of insns. */
- if (total_pushed < 0)
- {
- emit_insn_before (gen_add2_insn (stack_pointer_rtx,
- GEN_INT (- total_pushed)),
- p);
- break;
- }
- XEXP (dest, 0)
- = plus_constant (stack_pointer_rtx, total_pushed);
- }
- }
-#endif
-
/* Detect and delete no-op move instructions
resulting from not allocating a parameter in a register. */
if we find it. */
static int
-calculate_can_reach_end (last, check_deleted, delete_final_note)
+calculate_can_reach_end (last, delete_final_note)
rtx last;
- int check_deleted;
int delete_final_note;
{
rtx insn = last;
/* See if we backed up to the appropriate type of note. */
if (insn != NULL_RTX
&& GET_CODE (insn) == NOTE
- && NOTE_LINE_NUMBER (insn) == NOTE_INSN_FUNCTION_END
- && (check_deleted == 0
- || ! INSN_DELETED_P (insn)))
+ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_FUNCTION_END)
{
if (delete_final_note)
delete_insn (insn);
rtx loop_start;
{
rtx insn, set, reg, p, link;
- rtx copy = 0;
+ rtx copy = 0, first_copy = 0;
int num_insns = 0;
rtx exitcode = NEXT_INSN (JUMP_LABEL (next_nonnote_insn (loop_start)));
rtx lastexit;
remove_note (insn, p);
if (++num_insns > 20
|| find_reg_note (insn, REG_RETVAL, NULL_RTX)
- || find_reg_note (insn, REG_LIBCALL, NULL_RTX)
- || asm_noperands (PATTERN (insn)) > 0)
+ || find_reg_note (insn, REG_LIBCALL, NULL_RTX))
return 0;
break;
default:
/* We can do the replacement. Allocate reg_map if this is the
first replacement we found. */
if (reg_map == 0)
- {
- reg_map = (rtx *) alloca (max_reg * sizeof (rtx));
- bzero ((char *) reg_map, max_reg * sizeof (rtx));
- }
+ reg_map = (rtx *) xcalloc (max_reg, sizeof (rtx));
REG_LOOP_TEST_P (reg) = 1;
/* Now copy each insn. */
for (insn = exitcode; insn != lastexit; insn = NEXT_INSN (insn))
- switch (GET_CODE (insn))
- {
- case BARRIER:
- copy = emit_barrier_before (loop_start);
- break;
- case NOTE:
- /* Only copy line-number notes. */
- if (NOTE_LINE_NUMBER (insn) >= 0)
- {
- copy = emit_note_before (NOTE_LINE_NUMBER (insn), loop_start);
- NOTE_SOURCE_FILE (copy) = NOTE_SOURCE_FILE (insn);
- }
- break;
-
- case INSN:
- copy = emit_insn_before (copy_rtx (PATTERN (insn)), loop_start);
- if (reg_map)
- replace_regs (PATTERN (copy), reg_map, max_reg, 1);
-
- mark_jump_label (PATTERN (copy), copy, 0);
-
- /* Copy all REG_NOTES except REG_LABEL since mark_jump_label will
- make them. */
- for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
- if (REG_NOTE_KIND (link) != REG_LABEL)
- REG_NOTES (copy)
- = copy_rtx (gen_rtx_EXPR_LIST (REG_NOTE_KIND (link),
- XEXP (link, 0),
- REG_NOTES (copy)));
- if (reg_map && REG_NOTES (copy))
- replace_regs (REG_NOTES (copy), reg_map, max_reg, 1);
- break;
-
- case JUMP_INSN:
- copy = emit_jump_insn_before (copy_rtx (PATTERN (insn)), loop_start);
- if (reg_map)
- replace_regs (PATTERN (copy), reg_map, max_reg, 1);
- mark_jump_label (PATTERN (copy), copy, 0);
- if (REG_NOTES (insn))
- {
- REG_NOTES (copy) = copy_rtx (REG_NOTES (insn));
- if (reg_map)
- replace_regs (REG_NOTES (copy), reg_map, max_reg, 1);
- }
-
- /* If this is a simple jump, add it to the jump chain. */
-
- if (INSN_UID (copy) < max_jump_chain && JUMP_LABEL (copy)
- && simplejump_p (copy))
- {
- jump_chain[INSN_UID (copy)]
- = jump_chain[INSN_UID (JUMP_LABEL (copy))];
- jump_chain[INSN_UID (JUMP_LABEL (copy))] = copy;
- }
- break;
+ {
+ switch (GET_CODE (insn))
+ {
+ case BARRIER:
+ copy = emit_barrier_before (loop_start);
+ break;
+ case NOTE:
+ /* Only copy line-number notes. */
+ if (NOTE_LINE_NUMBER (insn) >= 0)
+ {
+ copy = emit_note_before (NOTE_LINE_NUMBER (insn), loop_start);
+ NOTE_SOURCE_FILE (copy) = NOTE_SOURCE_FILE (insn);
+ }
+ break;
+
+ case INSN:
+ copy = emit_insn_before (copy_insn (PATTERN (insn)), loop_start);
+ if (reg_map)
+ replace_regs (PATTERN (copy), reg_map, max_reg, 1);
+
+ mark_jump_label (PATTERN (copy), copy, 0, 0);
+
+ /* Copy all REG_NOTES except REG_LABEL since mark_jump_label will
+ make them. */
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) != REG_LABEL)
+ REG_NOTES (copy)
+ = copy_insn_1 (gen_rtx_EXPR_LIST (REG_NOTE_KIND (link),
+ XEXP (link, 0),
+ REG_NOTES (copy)));
+ if (reg_map && REG_NOTES (copy))
+ replace_regs (REG_NOTES (copy), reg_map, max_reg, 1);
+ break;
+
+ case JUMP_INSN:
+ copy = emit_jump_insn_before (copy_insn (PATTERN (insn)), loop_start);
+ if (reg_map)
+ replace_regs (PATTERN (copy), reg_map, max_reg, 1);
+ mark_jump_label (PATTERN (copy), copy, 0, 0);
+ if (REG_NOTES (insn))
+ {
+ REG_NOTES (copy) = copy_insn_1 (REG_NOTES (insn));
+ if (reg_map)
+ replace_regs (REG_NOTES (copy), reg_map, max_reg, 1);
+ }
+
+ /* If this is a simple jump, add it to the jump chain. */
+
+ if (INSN_UID (copy) < max_jump_chain && JUMP_LABEL (copy)
+ && simplejump_p (copy))
+ {
+ jump_chain[INSN_UID (copy)]
+ = jump_chain[INSN_UID (JUMP_LABEL (copy))];
+ jump_chain[INSN_UID (JUMP_LABEL (copy))] = copy;
+ }
+ break;
+
+ default:
+ abort ();
+ }
- default:
- abort ();
- }
+ /* Record the first insn we copied. We need it so that we can
+ scan the copied insns for new pseudo registers. */
+ if (! first_copy)
+ first_copy = copy;
+ }
/* Now clean up by emitting a jump to the end label and deleting the jump
at the start of the loop. */
{
copy = emit_jump_insn_before (gen_jump (get_label_after (insn)),
loop_start);
- mark_jump_label (PATTERN (copy), copy, 0);
+
+ /* Record the first insn we copied. We need it so that we can
+ scan the copied insns for new pseudo registers. This may not
+ be strictly necessary since we should have copied at least one
+ insn above. But I am going to be safe. */
+ if (! first_copy)
+ first_copy = copy;
+
+ mark_jump_label (PATTERN (copy), copy, 0, 0);
if (INSN_UID (copy) < max_jump_chain
&& INSN_UID (JUMP_LABEL (copy)) < max_jump_chain)
{
emit_barrier_before (loop_start);
}
+ /* Now scan from the first insn we copied to the last insn we copied
+ (copy) for new pseudo registers. Do this after the code to jump to
+ the end label since that might create a new pseudo too. */
+ reg_scan_update (first_copy, copy, max_reg);
+
/* Mark the exit code as the virtual top of the converted loop. */
emit_note_before (NOTE_INSN_LOOP_VTOP, exitcode);
delete_insn (next_nonnote_insn (loop_start));
+
+ /* Clean up. */
+ if (reg_map)
+ free (reg_map);
return 1;
}
rtx prev = prev_nonnote_insn (insn);
rtx set;
- /* If the comparison itself was a loop invariant, it could have been
- hoisted out of the loop. If we proceed to unroll such a loop, then
- we may not be able to find the comparison when copying the loop.
-
- Returning zero in that case is the safe thing to do. */
- if (prev == 0)
- return 0;
-
- set = single_set (prev);
- if (set == 0 || SET_DEST (set) != arg0)
- return 0;
-
- arg0 = SET_SRC (set);
+ /* First see if the condition code mode alone if enough to say we can
+ reverse the condition. If not, then search backwards for a set of
+ ARG0. We do not need to check for an insn clobbering it since valid
+ code will contain set a set with no intervening clobber. But
+ stop when we reach a label. */
+#ifdef REVERSIBLE_CC_MODE
+ if (GET_MODE_CLASS (GET_MODE (arg0)) == MODE_CC
+ && REVERSIBLE_CC_MODE (GET_MODE (arg0)))
+ return 1;
+#endif
+
+ for (prev = prev_nonnote_insn (insn);
+ prev != 0 && GET_CODE (prev) != CODE_LABEL;
+ prev = prev_nonnote_insn (prev))
+ if ((set = single_set (prev)) != 0
+ && rtx_equal_p (SET_DEST (set), arg0))
+ {
+ arg0 = SET_SRC (set);
- if (GET_CODE (arg0) == COMPARE)
- arg0 = XEXP (arg0, 0);
+ if (GET_CODE (arg0) == COMPARE)
+ arg0 = XEXP (arg0, 0);
+ break;
+ }
}
/* We can reverse this if ARG0 is a CONST_INT or if its mode is
&& GET_MODE_CLASS (GET_MODE (arg0)) != MODE_FLOAT));
}
-/* Given an rtx-code for a comparison, return the code
- for the negated comparison.
- WATCH OUT! reverse_condition is not safe to use on a jump
- that might be acting on the results of an IEEE floating point comparison,
- because of the special treatment of non-signaling nans in comparisons.
+/* Given an rtx-code for a comparison, return the code for the negated
+ comparison. If no such code exists, return UNKNOWN.
+
+ WATCH OUT! reverse_condition is not safe to use on a jump that might
+ be acting on the results of an IEEE floating point comparison, because
+ of the special treatment of non-signaling nans in comparisons.
Use can_reverse_comparison_p to be sure. */
enum rtx_code
{
case EQ:
return NE;
-
case NE:
return EQ;
-
case GT:
return LE;
-
case GE:
return LT;
-
case LT:
return GE;
-
case LE:
return GT;
-
case GTU:
return LEU;
-
case GEU:
return LTU;
-
case LTU:
return GEU;
+ case LEU:
+ return GTU;
+ case UNORDERED:
+ return ORDERED;
+ case ORDERED:
+ return UNORDERED;
+
+ case UNLT:
+ case UNLE:
+ case UNGT:
+ case UNGE:
+ case UNEQ:
+ case LTGT:
+ return UNKNOWN;
+
+ default:
+ abort ();
+ }
+}
+
+/* Similar, but we're allowed to generate unordered comparisons, which
+ makes it safe for IEEE floating-point. Of course, we have to recognize
+ that the target will support them too... */
+
+enum rtx_code
+reverse_condition_maybe_unordered (code)
+ enum rtx_code code;
+{
+ /* Non-IEEE formats don't have unordered conditions. */
+ if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT)
+ return reverse_condition (code);
+ switch (code)
+ {
+ case EQ:
+ return NE;
+ case NE:
+ return EQ;
+ case GT:
+ return UNLE;
+ case GE:
+ return UNLT;
+ case LT:
+ return UNGE;
+ case LE:
+ return UNGT;
+ case LTGT:
+ return UNEQ;
+ case GTU:
+ return LEU;
+ case GEU:
+ return LTU;
+ case LTU:
+ return GEU;
case LEU:
return GTU;
+ case UNORDERED:
+ return ORDERED;
+ case ORDERED:
+ return UNORDERED;
+ case UNLT:
+ return GE;
+ case UNLE:
+ return GT;
+ case UNGT:
+ return LE;
+ case UNGE:
+ return LT;
+ case UNEQ:
+ return LTGT;
default:
abort ();
- return UNKNOWN;
}
}
{
case EQ:
case NE:
+ case UNORDERED:
+ case ORDERED:
+ case UNEQ:
+ case LTGT:
return code;
case GT:
return LT;
-
case GE:
return LE;
-
case LT:
return GT;
-
case LE:
return GE;
-
case GTU:
return LTU;
-
case GEU:
return LEU;
-
case LTU:
return GTU;
-
case LEU:
return GEU;
+ case UNLT:
+ return UNGT;
+ case UNLE:
+ return UNGE;
+ case UNGT:
+ return UNLT;
+ case UNGE:
+ return UNLE;
default:
abort ();
- return UNKNOWN;
}
}
case GT:
return GTU;
-
case GE:
return GEU;
-
case LT:
return LTU;
-
case LE:
return LEU;
case GTU:
return GT;
-
case GEU:
return GE;
-
case LTU:
return LT;
-
case LEU:
return LE;
switch (code1)
{
case EQ:
- if (code2 == LE || code2 == LEU || code2 == GE || code2 == GEU)
+ if (code2 == LE || code2 == LEU || code2 == GE || code2 == GEU
+ || code2 == ORDERED)
return 1;
break;
case LT:
- if (code2 == LE || code2 == NE)
+ if (code2 == LE || code2 == NE || code2 == ORDERED)
return 1;
break;
case GT:
- if (code2 == GE || code2 == NE)
+ if (code2 == GE || code2 == NE || code2 == ORDERED)
+ return 1;
+ break;
+
+ case GE:
+ case LE:
+ if (code2 == ORDERED)
+ return 1;
+ break;
+
+ case LTGT:
+ if (code2 == NE || code2 == ORDERED)
return 1;
break;
if (code2 == GEU || code2 == NE)
return 1;
break;
+
+ case UNORDERED:
+ if (code2 == NE)
+ return 1;
+ break;
default:
break;
rtx insn;
{
register rtx x = PATTERN (insn);
- if (GET_CODE (x) != SET)
- return 0;
- if (GET_CODE (SET_DEST (x)) != PC)
- return 0;
- if (GET_CODE (SET_SRC (x)) == LABEL_REF)
- return 1;
- if (GET_CODE (SET_SRC (x)) != IF_THEN_ELSE)
+
+ if (GET_CODE (x) != SET
+ || GET_CODE (SET_DEST (x)) != PC)
return 0;
- if (XEXP (SET_SRC (x), 2) == pc_rtx
- && (GET_CODE (XEXP (SET_SRC (x), 1)) == LABEL_REF
- || GET_CODE (XEXP (SET_SRC (x), 1)) == RETURN))
- return 1;
- if (XEXP (SET_SRC (x), 1) == pc_rtx
- && (GET_CODE (XEXP (SET_SRC (x), 2)) == LABEL_REF
- || GET_CODE (XEXP (SET_SRC (x), 2)) == RETURN))
+
+ x = SET_SRC (x);
+ if (GET_CODE (x) == LABEL_REF)
return 1;
+ else return (GET_CODE (x) == IF_THEN_ELSE
+ && ((GET_CODE (XEXP (x, 2)) == PC
+ && (GET_CODE (XEXP (x, 1)) == LABEL_REF
+ || GET_CODE (XEXP (x, 1)) == RETURN))
+ || (GET_CODE (XEXP (x, 1)) == PC
+ && (GET_CODE (XEXP (x, 2)) == LABEL_REF
+ || GET_CODE (XEXP (x, 2)) == RETURN))));
+
return 0;
}
-/* Return nonzero if INSN is a (possibly) conditional jump
- and nothing more. */
+/* Return nonzero if INSN is a (possibly) conditional jump inside a
+ PARALLEL. */
int
condjump_in_parallel_p (insn)
void *data ATTRIBUTE_UNUSED;
{
rtx x = *loc;
- return GET_CODE (x) == RETURN;
+ return x && GET_CODE (x) == RETURN;
}
int
two labels distinct if they are separated by only USE or CLOBBER insns. */
static void
-mark_jump_label (x, insn, cross_jump)
+mark_jump_label (x, insn, cross_jump, in_mem)
register rtx x;
rtx insn;
int cross_jump;
+ int in_mem;
{
register RTX_CODE code = GET_CODE (x);
register int i;
- register char *fmt;
+ register const char *fmt;
switch (code)
{
case REG:
case SUBREG:
case CONST_INT:
- case SYMBOL_REF:
case CONST_DOUBLE:
case CLOBBER:
case CALL:
return;
case MEM:
+ in_mem = 1;
+ break;
+
+ case SYMBOL_REF:
+ if (!in_mem)
+ return;
+
/* If this is a constant-pool reference, see if it is a label. */
- if (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
- && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)))
- mark_jump_label (get_pool_constant (XEXP (x, 0)), insn, cross_jump);
+ if (CONSTANT_POOL_ADDRESS_P (x))
+ mark_jump_label (get_pool_constant (x), insn, cross_jump, in_mem);
break;
case LABEL_REF:
int eltnum = code == ADDR_DIFF_VEC ? 1 : 0;
for (i = 0; i < XVECLEN (x, eltnum); i++)
- mark_jump_label (XVECEXP (x, eltnum, i), NULL_RTX, cross_jump);
+ mark_jump_label (XVECEXP (x, eltnum, i), NULL_RTX,
+ cross_jump, in_mem);
}
return;
for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
{
if (fmt[i] == 'e')
- mark_jump_label (XEXP (x, i), insn, cross_jump);
+ mark_jump_label (XEXP (x, i), insn, cross_jump, in_mem);
else if (fmt[i] == 'E')
{
register int j;
for (j = 0; j < XVECLEN (x, i); j++)
- mark_jump_label (XVECEXP (x, i, j), insn, cross_jump);
+ mark_jump_label (XVECEXP (x, i, j), insn, cross_jump, in_mem);
}
}
}
delete_computation (insn);
}
+/* Verify INSN is a BARRIER and delete it. */
+
+void
+delete_barrier (insn)
+ rtx insn;
+{
+ if (GET_CODE (insn) != BARRIER)
+ abort ();
+
+ delete_insn (insn);
+}
+
/* Recursively delete prior insns that compute the value (used only by INSN
which the caller is deleting) stored in the register mentioned by NOTE
which is a REG_DEAD note associated with INSN. */
rtx reg = XEXP (note, 0);
for (our_prev = prev_nonnote_insn (insn);
- our_prev && GET_CODE (our_prev) == INSN;
+ our_prev && (GET_CODE (our_prev) == INSN
+ || GET_CODE (our_prev) == CALL_INSN);
our_prev = prev_nonnote_insn (our_prev))
{
rtx pat = PATTERN (our_prev);
+ /* If we reach a CALL which is not calling a const function
+ or the callee pops the arguments, then give up. */
+ if (GET_CODE (our_prev) == CALL_INSN
+ && (! CONST_CALL_P (our_prev)
+ || GET_CODE (pat) != SET || GET_CODE (SET_SRC (pat)) != CALL))
+ break;
+
/* If we reach a SEQUENCE, it is too complex to try to
do anything with it, so give up. */
if (GET_CODE (pat) == SEQUENCE)
if (reg_set_p (reg, pat))
{
- if (side_effects_p (pat))
+ if (side_effects_p (pat) && GET_CODE (our_prev) != CALL_INSN)
break;
if (GET_CODE (pat) == PARALLEL)
insns. Write REG_UNUSED notes for those parts that were not
needed. */
else if (dest_regno <= regno
- && dest_endregno >= endregno
- && ! find_regno_note (our_prev, REG_UNUSED, REGNO(reg)))
+ && dest_endregno >= endregno)
{
int i;
}
#endif
+ /* The REG_DEAD note may have been omitted for a register
+ which is both set and used by the insn. */
set = single_set (insn);
+ if (set && GET_CODE (SET_DEST (set)) == REG)
+ {
+ int dest_regno = REGNO (SET_DEST (set));
+ int dest_endregno
+ = dest_regno + (dest_regno < FIRST_PSEUDO_REGISTER
+ ? HARD_REGNO_NREGS (dest_regno,
+ GET_MODE (SET_DEST (set))) : 1);
+ int i;
+
+ for (i = dest_regno; i < dest_endregno; i++)
+ {
+ if (! refers_to_regno_p (i, i + 1, SET_SRC (set), NULL_PTR)
+ || find_regno_note (insn, REG_DEAD, i))
+ continue;
+
+ note = gen_rtx_EXPR_LIST (REG_DEAD, (i < FIRST_PSEUDO_REGISTER
+ ? gen_rtx_REG (reg_raw_mode[i], i)
+ : SET_DEST (set)), NULL_RTX);
+ delete_prior_computation (note, insn);
+ }
+ }
for (note = REG_NOTES (insn); note; note = next)
{
|| GET_CODE (XEXP (note, 0)) != REG)
continue;
- if (set && reg_overlap_mentioned_p (SET_DEST (set), XEXP (note, 0)))
- set = NULL_RTX;
-
- delete_prior_computation (note, insn);
- }
-
- /* The REG_DEAD note may have been omitted for a register
- which is both set and used by the insn. */
- if (set
- && GET_CODE (SET_DEST (set)) == REG
- && reg_mentioned_p (SET_DEST (set), SET_SRC (set)))
- {
- note = gen_rtx_EXPR_LIST (REG_DEAD, SET_DEST (set), NULL_RTX);
delete_prior_computation (note, insn);
}
if (was_code_label)
remove_node_from_expr_list (insn, &nonlocal_goto_handler_labels);
- /* Don't delete user-declared labels. Convert them to special NOTEs
- instead. */
- if (was_code_label && LABEL_NAME (insn) != 0
- && optimize && ! dont_really_delete)
+ /* Don't delete user-declared labels. When optimizing, convert them
+ to special NOTEs instead. When not optimizing, leave them alone. */
+ if (was_code_label && LABEL_NAME (insn) != 0)
{
- PUT_CODE (insn, NOTE);
- NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED_LABEL;
- NOTE_SOURCE_FILE (insn) = 0;
- dont_really_delete = 1;
+ if (! optimize)
+ dont_really_delete = 1;
+ else if (! dont_really_delete)
+ {
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED_LABEL;
+ NOTE_SOURCE_FILE (insn) = 0;
+ dont_really_delete = 1;
+ }
}
else
/* Mark this insn as deleted. */
/* Patch out INSN (and the barrier if any) */
- if (optimize && ! dont_really_delete)
+ if (! dont_really_delete)
{
if (prev)
{
is also an unconditional jump in that case. */
}
\f
+/* We have determined that INSN is never reached, and are about to
+ delete it. Print a warning if the user asked for one.
+
+ To try to make this warning more useful, this should only be called
+ once per basic block not reached, and it only warns when the basic
+ block contains more than one line from the current function, and
+ contains at least one operation. CSE and inlining can duplicate insns,
+ so it's possible to get spurious warnings from this. */
+
+void
+never_reached_warning (avoided_insn)
+ rtx avoided_insn;
+{
+ rtx insn;
+ rtx a_line_note = NULL;
+ int two_avoided_lines = 0;
+ int contains_insn = 0;
+
+ if (! warn_notreached)
+ return;
+
+ /* Scan forwards, looking at LINE_NUMBER notes, until
+ we hit a LABEL or we run out of insns. */
+
+ for (insn = avoided_insn; insn != NULL; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == CODE_LABEL)
+ break;
+ else if (GET_CODE (insn) == NOTE /* A line number note? */
+ && NOTE_LINE_NUMBER (insn) >= 0)
+ {
+ if (a_line_note == NULL)
+ a_line_note = insn;
+ else
+ two_avoided_lines |= (NOTE_LINE_NUMBER (a_line_note)
+ != NOTE_LINE_NUMBER (insn));
+ }
+ else if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ contains_insn = 1;
+ }
+ if (two_avoided_lines && contains_insn)
+ warning_with_file_and_line (NOTE_SOURCE_FILE (a_line_note),
+ NOTE_LINE_NUMBER (a_line_note),
+ "will never be executed");
+}
+\f
/* Invert the condition of the jump JUMP, and make it jump
to label NLABEL instead of where it jumps now. */
{
register RTX_CODE code;
register int i;
- register char *fmt;
+ register const char *fmt;
code = GET_CODE (x);
for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
{
if (fmt[i] == 'e')
- if (! invert_exp (XEXP (x, i), insn))
- return 0;
- if (fmt[i] == 'E')
+ {
+ if (! invert_exp (XEXP (x, i), insn))
+ return 0;
+ }
+ else if (fmt[i] == 'E')
{
register int j;
for (j = 0; j < XVECLEN (x, i); j++)
if (nlabel)
++LABEL_NUSES (nlabel);
+ /* If we're eliding the jump over exception cleanups at the end of a
+ function, move the function end note so that -Wreturn-type works. */
+ if (olabel && NEXT_INSN (olabel)
+ && GET_CODE (NEXT_INSN (olabel)) == NOTE
+ && NOTE_LINE_NUMBER (NEXT_INSN (olabel)) == NOTE_INSN_FUNCTION_END)
+ emit_note_after (NOTE_INSN_FUNCTION_END, nlabel);
+
if (olabel && --LABEL_NUSES (olabel) == 0)
delete_insn (olabel);
register rtx x = *loc;
register RTX_CODE code = GET_CODE (x);
register int i;
- register char *fmt;
+ register const char *fmt;
if (code == LABEL_REF)
{
for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
{
if (fmt[i] == 'e')
- if (! redirect_exp (&XEXP (x, i), olabel, nlabel, insn))
- return 0;
- if (fmt[i] == 'E')
+ {
+ if (! redirect_exp (&XEXP (x, i), olabel, nlabel, insn))
+ return 0;
+ }
+ else if (fmt[i] == 'E')
{
register int j;
for (j = 0; j < XVECLEN (x, i); j++)
{
register int i;
register RTX_CODE code = GET_CODE (x);
- register char *fmt;
+ register const char *fmt;
if (x == y)
return 1;
branch and the second branch. It marks any changed registers. */
static void
-mark_modified_reg (dest, x)
+mark_modified_reg (dest, x, data)
rtx dest;
rtx x ATTRIBUTE_UNUSED;
+ void *data ATTRIBUTE_UNUSED;
{
- int regno, i;
+ int regno;
+ unsigned int i;
if (GET_CODE (dest) == SUBREG)
dest = SUBREG_REG (dest);
int *all_reset;
/* Allocate register tables and quick-reset table. */
- modified_regs = (char *) alloca (max_reg * sizeof (char));
- same_regs = (int *) alloca (max_reg * sizeof (int));
- all_reset = (int *) alloca (max_reg * sizeof (int));
+ modified_regs = (char *) xmalloc (max_reg * sizeof (char));
+ same_regs = (int *) xmalloc (max_reg * sizeof (int));
+ all_reset = (int *) xmalloc (max_reg * sizeof (int));
for (i = 0; i < max_reg; i++)
all_reset[i] = -1;
modified_regs[i] = 1;
}
- note_stores (PATTERN (b2), mark_modified_reg);
+ note_stores (PATTERN (b2), mark_modified_reg, NULL);
}
/* Check the next candidate branch insn from the label
if (rtx_equal_for_thread_p (b1op0, b2op0, b2)
&& rtx_equal_for_thread_p (b1op1, b2op1, b2)
&& (comparison_dominates_p (code1, code2)
- || (comparison_dominates_p (code1, reverse_condition (code2))
- && can_reverse_comparison_p (XEXP (SET_SRC (PATTERN (b1)),
- 0),
- b1))))
+ || (can_reverse_comparison_p (XEXP (SET_SRC (PATTERN (b1)),
+ 0),
+ b1)
+ && comparison_dominates_p (code1, reverse_condition (code2)))))
+
{
t1 = prev_nonnote_insn (b1);
t2 = prev_nonnote_insn (b2);
}
}
}
+
+ /* Clean up. */
+ free (modified_regs);
+ free (same_regs);
+ free (all_reset);
}
\f
/* This is like RTX_EQUAL_P except that it knows about our handling of
register int i;
register int j;
register enum rtx_code code;
- register char *fmt;
+ register const char *fmt;
code = GET_CODE (x);
/* Rtx's of different codes cannot be equal. */
return 1;
}
else
- return (same_regs[REGNO (x)] == REGNO (y));
+ return (same_regs[REGNO (x)] == (int) REGNO (y));
break;
if (GET_CODE (SET_DEST (x)) == REG
&& GET_CODE (SET_DEST (y)) == REG)
{
- if (same_regs[REGNO (SET_DEST (x))] == REGNO (SET_DEST (y)))
+ if (same_regs[REGNO (SET_DEST (x))] == (int) REGNO (SET_DEST (y)))
{
same_regs[REGNO (SET_DEST (x))] = -1;
num_same_regs--;
break;
case '0':
+ case 't':
break;
/* It is believed that rtx's at this level will never
}
\f
-#ifndef HAVE_cc0
+#if !defined(HAVE_cc0) && !defined(HAVE_conditional_arithmetic)
/* Return the insn that NEW can be safely inserted in front of starting at
the jump insn INSN. Return 0 if it is not safe to do this jump
optimization. Note that NEW must contain a single set. */