/* Optimize jump instructions, for GNU compiler.
- Copyright (C) 1987, 88, 89, 91-98, 1999 Free Software Foundation, Inc.
+ Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997
+ 1998, 1999, 2000 Free Software Foundation, Inc.
This file is part of GNU CC.
static int cross_jump_death_matters = 0;
-static int init_label_info PROTO((rtx));
-static void delete_barrier_successors PROTO((rtx));
-static void mark_all_labels PROTO((rtx, int));
-static rtx delete_unreferenced_labels PROTO((rtx));
-static void delete_noop_moves PROTO((rtx));
-static int calculate_can_reach_end PROTO((rtx, int, int));
-static int duplicate_loop_exit_test PROTO((rtx));
-static void find_cross_jump PROTO((rtx, rtx, int, rtx *, rtx *));
-static void do_cross_jump PROTO((rtx, rtx, rtx));
-static int jump_back_p PROTO((rtx, rtx));
-static int tension_vector_labels PROTO((rtx, int));
-static void mark_jump_label PROTO((rtx, rtx, int));
-static void delete_computation PROTO((rtx));
-static void delete_from_jump_chain PROTO((rtx));
-static int delete_labelref_insn PROTO((rtx, rtx, int));
-static void mark_modified_reg PROTO((rtx, rtx));
-static void redirect_tablejump PROTO((rtx, rtx));
-static void jump_optimize_1 PROTO ((rtx, int, int, int, int));
+static int init_label_info PARAMS ((rtx));
+static void delete_barrier_successors PARAMS ((rtx));
+static void mark_all_labels PARAMS ((rtx, int));
+static rtx delete_unreferenced_labels PARAMS ((rtx));
+static void delete_noop_moves PARAMS ((rtx));
+static int calculate_can_reach_end PARAMS ((rtx, int));
+static int duplicate_loop_exit_test PARAMS ((rtx));
+static void find_cross_jump PARAMS ((rtx, rtx, int, rtx *, rtx *));
+static void do_cross_jump PARAMS ((rtx, rtx, rtx));
+static int jump_back_p PARAMS ((rtx, rtx));
+static int tension_vector_labels PARAMS ((rtx, int));
+static void mark_jump_label PARAMS ((rtx, rtx, int, int));
+static void delete_computation PARAMS ((rtx));
+static void delete_from_jump_chain PARAMS ((rtx));
+static int delete_labelref_insn PARAMS ((rtx, rtx, int));
+static void mark_modified_reg PARAMS ((rtx, rtx, void *));
+static void redirect_tablejump PARAMS ((rtx, rtx));
+static void jump_optimize_1 PARAMS ((rtx, int, int, int, int, int));
#if ! defined(HAVE_cc0) && ! defined(HAVE_conditional_arithmetic)
-static rtx find_insert_position PROTO((rtx, rtx));
+static rtx find_insert_position PARAMS ((rtx, rtx));
#endif
-static int returnjump_p_1 PROTO((rtx *, void *));
-static void delete_prior_computation PROTO((rtx, rtx));
-
+static int returnjump_p_1 PARAMS ((rtx *, void *));
+static void delete_prior_computation PARAMS ((rtx, rtx));
+\f
/* Main external entry point into the jump optimizer. See comments before
jump_optimize_1 for descriptions of the arguments. */
void
int noop_moves;
int after_regscan;
{
- jump_optimize_1 (f, cross_jump, noop_moves, after_regscan, 0);
+ jump_optimize_1 (f, cross_jump, noop_moves, after_regscan, 0, 0);
}
/* Alternate entry into the jump optimizer. This entry point only rebuilds
rebuild_jump_labels (f)
rtx f;
{
- jump_optimize_1 (f, 0, 0, 0, 1);
+ jump_optimize_1 (f, 0, 0, 0, 1, 0);
}
+/* Alternate entry into the jump optimizer. Do only trivial optimizations. */
+void
+jump_optimize_minimal (f)
+ rtx f;
+{
+ jump_optimize_1 (f, 0, 0, 0, 0, 1);
+}
\f
/* Delete no-op jumps and optimize jumps to jumps
and jumps around jumps.
just determine whether control drops off the end of the function.
This case occurs when we have -W and not -O.
It works because `delete_insn' checks the value of `optimize'
- and refrains from actually deleting when that is 0. */
+ and refrains from actually deleting when that is 0.
+
+ If MINIMAL is nonzero, then we only perform trivial optimizations:
+
+ * Removal of unreachable code after BARRIERs.
+ * Removal of unreferenced CODE_LABELs.
+ * Removal of a jump to the next instruction.
+ * Removal of a conditional jump followed by an unconditional jump
+ to the same target as the conditional jump.
+ * Simplify a conditional jump around an unconditional jump.
+ * Simplify a jump to a jump.
+ * Delete extraneous line number notes.
+ */
static void
-jump_optimize_1 (f, cross_jump, noop_moves, after_regscan, mark_labels_only)
+jump_optimize_1 (f, cross_jump, noop_moves, after_regscan,
+ mark_labels_only, minimal)
rtx f;
int cross_jump;
int noop_moves;
int after_regscan;
int mark_labels_only;
+ int minimal;
{
register rtx insn, next;
int changed;
if (flag_exceptions && cross_jump)
init_insn_eh_region (f, max_uid);
- delete_barrier_successors (f);
+ if (! mark_labels_only)
+ delete_barrier_successors (f);
/* Leave some extra room for labels and duplicate exit test insns
we make. */
max_jump_chain = max_uid * 14 / 10;
- jump_chain = (rtx *) alloca (max_jump_chain * sizeof (rtx));
- bzero ((char *) jump_chain, max_jump_chain * sizeof (rtx));
+ jump_chain = (rtx *) xcalloc (max_jump_chain, sizeof (rtx));
mark_all_labels (f, cross_jump);
/* Quit now if we just wanted to rebuild the JUMP_LABEL and REG_LABEL
notes and recompute LABEL_NUSES. */
if (mark_labels_only)
- return;
+ goto end;
- exception_optimize ();
+ if (! minimal)
+ exception_optimize ();
last_insn = delete_unreferenced_labels (f);
- if (optimize == 0)
- {
- /* CAN_REACH_END is persistent for each function. Once set it should
- not be cleared. This is especially true for the case where we
- delete the NOTE_FUNCTION_END note. CAN_REACH_END is cleared by
- the front-end before compiling each function. */
- if (calculate_can_reach_end (last_insn, 1, 0))
- can_reach_end = 1;
-
- /* Zero the "deleted" flag of all the "deleted" insns. */
- for (insn = f; insn; insn = NEXT_INSN (insn))
- INSN_DELETED_P (insn) = 0;
-
- /* Show that the jump chain is not valid. */
- jump_chain = 0;
- return;
- }
-
-#ifdef HAVE_return
- if (HAVE_return)
- {
- /* If we fall through to the epilogue, see if we can insert a RETURN insn
- in front of it. If the machine allows it at this point (we might be
- after reload for a leaf routine), it will improve optimization for it
- to be there. */
- insn = get_last_insn ();
- while (insn && GET_CODE (insn) == NOTE)
- insn = PREV_INSN (insn);
-
- if (insn && GET_CODE (insn) != BARRIER)
- {
- emit_jump_insn (gen_return ());
- emit_barrier ();
- }
- }
-#endif
-
if (noop_moves)
delete_noop_moves (f);
This helps some of the optimizations below by having less insns
being jumped around. */
- if (! reload_completed && after_regscan)
+ if (optimize && ! reload_completed && after_regscan)
for (insn = f; insn; insn = next)
{
rtx set = single_set (insn);
for (insn = f; insn; insn = next)
{
rtx reallabelprev;
- rtx temp, temp1, temp2, temp3, temp4, temp5, temp6;
+ rtx temp, temp1, temp2 = NULL_RTX, temp3, temp4, temp5, temp6;
rtx nlabel;
int this_is_simplejump, this_is_condjump, reversep = 0;
int this_is_condjump_in_parallel;
if (nlabel != JUMP_LABEL (insn))
changed |= redirect_jump (insn, nlabel);
+ if (! optimize || minimal)
+ continue;
+
/* If a dispatch table always goes to the same place,
get rid of it and replace the insn that uses it. */
int diff_vec_p = GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC;
int len = XVECLEN (pat, diff_vec_p);
rtx dispatch = prev_real_insn (insn);
+ rtx set;
for (i = 0; i < len; i++)
if (XEXP (XVECEXP (pat, diff_vec_p, i), 0)
!= XEXP (XVECEXP (pat, diff_vec_p, 0), 0))
break;
+
if (i == len
&& dispatch != 0
&& GET_CODE (dispatch) == JUMP_INSN
&& JUMP_LABEL (dispatch) != 0
- /* Don't mess with a casesi insn. */
- && !(GET_CODE (PATTERN (dispatch)) == SET
- && (GET_CODE (SET_SRC (PATTERN (dispatch)))
- == IF_THEN_ELSE))
+ /* Don't mess with a casesi insn.
+ XXX according to the comment before computed_jump_p(),
+ all casesi insns should be a parallel of the jump
+ and a USE of a LABEL_REF. */
+ && ! ((set = single_set (dispatch)) != NULL
+ && (GET_CODE (SET_SRC (set)) == IF_THEN_ELSE))
&& next_real_insn (JUMP_LABEL (dispatch)) == insn)
{
redirect_tablejump (dispatch,
#endif /* HAVE_cc0 */
#ifdef HAVE_conditional_arithmetic
+ /* ??? This is disabled in genconfig, as this simple-minded
+ transformation can incredibly lengthen register lifetimes.
+
+ Consider this example from cexp.c's yyparse:
+
+ 234 (set (pc)
+ (if_then_else (ne (reg:DI 149) (const_int 0 [0x0]))
+ (label_ref 248) (pc)))
+ 237 (set (reg/i:DI 0 $0) (const_int 1 [0x1]))
+ 239 (set (pc) (label_ref 2382))
+ 248 (code_label ("yybackup"))
+
+ This will be transformed to:
+
+ 237 (set (reg/i:DI 0 $0)
+ (if_then_else:DI (eq (reg:DI 149) (const_int 0 [0x0]))
+ (const_int 1 [0x1]) (reg/i:DI 0 $0)))
+ 239 (set (pc)
+ (if_then_else (eq (reg:DI 149) (const_int 0 [0x0]))
+ (label_ref 2382) (pc)))
+
+ which, from this narrow viewpoint looks fine. Except that
+ between this and 3 other ocurrences of the same pattern, $0
+ is now live for basically the entire function, and we'll
+ get an abort in caller_save.
+
+ Any replacement for this code should recall that a set of
+ a register that is not live need not, and indeed should not,
+ be conditionalized. Either that, or delay the transformation
+ until after register allocation. */
+
/* See if this is a conditional jump around a small number of
instructions that we can conditionalize. Don't do this before
the initial CSE pass or after reload.
CALL_INSN, which some machines, such as the ARC, can do, but
this is a very minor optimization. */
if (this_is_condjump && ! this_is_simplejump
- && cse_not_expected && optimize > 0 && ! reload_completed
+ && cse_not_expected && ! reload_completed
&& BRANCH_COST > 2
&& can_reverse_comparison_p (XEXP (SET_SRC (PATTERN (insn)), 0),
insn))
need to remove the BARRIER if we succeed. We can only
have one such jump since there must be a label after
the BARRIER and it's either ours, in which case it's the
- only one or some other, in which case we'd fail. */
+ only one or some other, in which case we'd fail.
+ Likewise if it's a CALL_INSN followed by a BARRIER. */
- if (simplejump_p (temp1))
- changed_jump = temp1;
+ if (simplejump_p (temp1)
+ || (GET_CODE (temp1) == CALL_INSN
+ && NEXT_INSN (temp1) != 0
+ && GET_CODE (NEXT_INSN (temp1)) == BARRIER))
+ {
+ if (changed_jump == 0)
+ changed_jump = temp1;
+ else
+ changed_jump
+ = gen_rtx_INSN_LIST (VOIDmode, temp1, changed_jump);
+ }
/* See if we are allowed another insn and if this insn
if one we think we may be able to handle. */
if (++num_insns > BRANCH_COST
|| last_insn
- || (temp2 = single_set (temp1)) == 0
- || side_effects_p (SET_SRC (temp2))
- || may_trap_p (SET_SRC (temp2)))
- failed = 1;
- else
+ || (((temp2 = single_set (temp1)) == 0
+ || side_effects_p (SET_SRC (temp2))
+ || may_trap_p (SET_SRC (temp2)))
+ && GET_CODE (temp1) != CALL_INSN))
+ failed = 1;
+ else if (temp2 != 0)
validate_change (temp1, &SET_SRC (temp2),
gen_rtx_IF_THEN_ELSE
(GET_MODE (SET_DEST (temp2)),
copy_rtx (ourcond),
SET_SRC (temp2), SET_DEST (temp2)),
1);
+ else
+ {
+ /* This is a CALL_INSN that doesn't have a SET. */
+ rtx *call_loc = &PATTERN (temp1);
+
+ if (GET_CODE (*call_loc) == PARALLEL)
+ call_loc = &XVECEXP (*call_loc, 0, 0);
+
+ validate_change (temp1, call_loc,
+ gen_rtx_IF_THEN_ELSE
+ (VOIDmode, copy_rtx (ourcond),
+ *call_loc, const0_rtx),
+ 1);
+ }
+
if (modified_in_p (ourcond, temp1))
last_insn = 1;
if (changed_jump != 0)
{
- if (GET_CODE (NEXT_INSN (changed_jump)) != BARRIER)
- abort ();
+ while (GET_CODE (changed_jump) == INSN_LIST)
+ {
+ delete_barrier (NEXT_INSN (XEXP (changed_jump, 0)));
+ changed_jump = XEXP (changed_jump, 1);
+ }
- delete_insn (NEXT_INSN (changed_jump));
+ delete_barrier (NEXT_INSN (changed_jump));
}
delete_insn (insn);
}
}
#endif
+ /* If branches are expensive, convert
+ if (foo) bar++; to bar += (foo != 0);
+ and similarly for "bar--;"
+
+ INSN is the conditional branch around the arithmetic. We set:
+
+ TEMP is the arithmetic insn.
+ TEMP1 is the SET doing the arithmetic.
+ TEMP2 is the operand being incremented or decremented.
+ TEMP3 to the condition being tested.
+ TEMP4 to the earliest insn used to find the condition. */
+
+ if ((BRANCH_COST >= 2
+#ifdef HAVE_incscc
+ || HAVE_incscc
+#endif
+#ifdef HAVE_decscc
+ || HAVE_decscc
+#endif
+ )
+ && ! reload_completed
+ && this_is_condjump && ! this_is_simplejump
+ && (temp = next_nonnote_insn (insn)) != 0
+ && (temp1 = single_set (temp)) != 0
+ && (temp2 = SET_DEST (temp1),
+ GET_MODE_CLASS (GET_MODE (temp2)) == MODE_INT)
+ && GET_CODE (SET_SRC (temp1)) == PLUS
+ && (XEXP (SET_SRC (temp1), 1) == const1_rtx
+ || XEXP (SET_SRC (temp1), 1) == constm1_rtx)
+ && rtx_equal_p (temp2, XEXP (SET_SRC (temp1), 0))
+ && ! side_effects_p (temp2)
+ && ! may_trap_p (temp2)
+ /* INSN must either branch to the insn after TEMP or the insn
+ after TEMP must branch to the same place as INSN. */
+ && (reallabelprev == temp
+ || ((temp3 = next_active_insn (temp)) != 0
+ && simplejump_p (temp3)
+ && JUMP_LABEL (temp3) == JUMP_LABEL (insn)))
+ && (temp3 = get_condition (insn, &temp4)) != 0
+ /* We must be comparing objects whose modes imply the size.
+ We could handle BLKmode if (1) emit_store_flag could
+ and (2) we could find the size reliably. */
+ && GET_MODE (XEXP (temp3, 0)) != BLKmode
+ && can_reverse_comparison_p (temp3, insn))
+ {
+ rtx temp6, target = 0, seq, init_insn = 0, init = temp2;
+ enum rtx_code code = reverse_condition (GET_CODE (temp3));
+
+ start_sequence ();
+
+ /* It must be the case that TEMP2 is not modified in the range
+ [TEMP4, INSN). The one exception we make is if the insn
+ before INSN sets TEMP2 to something which is also unchanged
+ in that range. In that case, we can move the initialization
+ into our sequence. */
+
+ if ((temp5 = prev_active_insn (insn)) != 0
+ && no_labels_between_p (temp5, insn)
+ && GET_CODE (temp5) == INSN
+ && (temp6 = single_set (temp5)) != 0
+ && rtx_equal_p (temp2, SET_DEST (temp6))
+ && (CONSTANT_P (SET_SRC (temp6))
+ || GET_CODE (SET_SRC (temp6)) == REG
+ || GET_CODE (SET_SRC (temp6)) == SUBREG))
+ {
+ emit_insn (PATTERN (temp5));
+ init_insn = temp5;
+ init = SET_SRC (temp6);
+ }
+
+ if (CONSTANT_P (init)
+ || ! reg_set_between_p (init, PREV_INSN (temp4), insn))
+ target = emit_store_flag (gen_reg_rtx (GET_MODE (temp2)), code,
+ XEXP (temp3, 0), XEXP (temp3, 1),
+ VOIDmode,
+ (code == LTU || code == LEU
+ || code == GTU || code == GEU), 1);
+
+ /* If we can do the store-flag, do the addition or
+ subtraction. */
+
+ if (target)
+ target = expand_binop (GET_MODE (temp2),
+ (XEXP (SET_SRC (temp1), 1) == const1_rtx
+ ? add_optab : sub_optab),
+ temp2, target, temp2, 0, OPTAB_WIDEN);
+
+ if (target != 0)
+ {
+ /* Put the result back in temp2 in case it isn't already.
+ Then replace the jump, possible a CC0-setting insn in
+ front of the jump, and TEMP, with the sequence we have
+ made. */
+
+ if (target != temp2)
+ emit_move_insn (temp2, target);
+
+ seq = get_insns ();
+ end_sequence ();
+
+ emit_insns_before (seq, temp4);
+ delete_insn (temp);
+
+ if (init_insn)
+ delete_insn (init_insn);
+
+ next = NEXT_INSN (insn);
+#ifdef HAVE_cc0
+ delete_insn (prev_nonnote_insn (insn));
+#endif
+ delete_insn (insn);
+
+ if (after_regscan)
+ {
+ reg_scan_update (seq, NEXT_INSN (next), old_max_reg);
+ old_max_reg = max_reg_num ();
+ }
+
+ changed = 1;
+ continue;
+ }
+ else
+ end_sequence ();
+ }
/* Try to use a conditional move (if the target has them), or a
store-flag insn. If the target has conditional arithmetic as
insn? After all, we're going to delete it. We'd have
to modify emit_conditional_move to take a comparison rtx
instead or write a new function. */
- cond0 = gen_reg_rtx (GET_MODE (XEXP (temp4, 0)));
+
/* We want the target to be able to simplify comparisons with
zero (and maybe other constants as well), so don't create
pseudos for them. There's no need to either. */
+ if (GET_CODE (XEXP (temp4, 0)) == CONST_INT
+ || GET_CODE (XEXP (temp4, 0)) == CONST_DOUBLE)
+ cond0 = XEXP (temp4, 0);
+ else
+ cond0 = gen_reg_rtx (GET_MODE (XEXP (temp4, 0)));
+
if (GET_CODE (XEXP (temp4, 1)) == CONST_INT
|| GET_CODE (XEXP (temp4, 1)) == CONST_DOUBLE)
cond1 = XEXP (temp4, 1);
start_sequence ();
- /* If we're not dealing with a register or the insn is more
- complex than a simple SET, duplicate the computation and
- replace the destination with a new temporary. */
- if (register_operand (temp2, GET_MODE (var))
- && GET_CODE (PATTERN (temp)) == SET)
+ /* We're dealing with a single_set insn with no side effects
+ on SET_SRC. We do need to be reasonably certain that if
+ we need to force BVAL into a register that we won't
+ clobber the flags -- general_operand should suffice. */
+ if (general_operand (temp2, GET_MODE (var)))
bval = temp2;
else
{
SET_DEST (temp6) = bval;
emit_insn (PATTERN (new_insn));
}
-
+
target = emit_conditional_move (var, code,
cond0, cond1, VOIDmode,
aval, bval, GET_MODE (var),
5) if (...) x = b; if jumps are even more expensive. */
if (GET_MODE_CLASS (GET_MODE (temp1)) == MODE_INT
+ /* We will be passing this as operand into expand_and. No
+ good if it's not valid as an operand. */
+ && general_operand (temp2, GET_MODE (temp2))
&& ((GET_CODE (temp3) == CONST_INT)
/* Make the latter case look like
x = x; if (...) x = 0; */
}
}
- /* If branches are expensive, convert
- if (foo) bar++; to bar += (foo != 0);
- and similarly for "bar--;"
-
- INSN is the conditional branch around the arithmetic. We set:
-
- TEMP is the arithmetic insn.
- TEMP1 is the SET doing the arithmetic.
- TEMP2 is the operand being incremented or decremented.
- TEMP3 to the condition being tested.
- TEMP4 to the earliest insn used to find the condition. */
-
- if ((BRANCH_COST >= 2
-#ifdef HAVE_incscc
- || HAVE_incscc
-#endif
-#ifdef HAVE_decscc
- || HAVE_decscc
-#endif
- )
- && ! reload_completed
- && this_is_condjump && ! this_is_simplejump
- && (temp = next_nonnote_insn (insn)) != 0
- && (temp1 = single_set (temp)) != 0
- && (temp2 = SET_DEST (temp1),
- GET_MODE_CLASS (GET_MODE (temp2)) == MODE_INT)
- && GET_CODE (SET_SRC (temp1)) == PLUS
- && (XEXP (SET_SRC (temp1), 1) == const1_rtx
- || XEXP (SET_SRC (temp1), 1) == constm1_rtx)
- && rtx_equal_p (temp2, XEXP (SET_SRC (temp1), 0))
- && ! side_effects_p (temp2)
- && ! may_trap_p (temp2)
- /* INSN must either branch to the insn after TEMP or the insn
- after TEMP must branch to the same place as INSN. */
- && (reallabelprev == temp
- || ((temp3 = next_active_insn (temp)) != 0
- && simplejump_p (temp3)
- && JUMP_LABEL (temp3) == JUMP_LABEL (insn)))
- && (temp3 = get_condition (insn, &temp4)) != 0
- /* We must be comparing objects whose modes imply the size.
- We could handle BLKmode if (1) emit_store_flag could
- and (2) we could find the size reliably. */
- && GET_MODE (XEXP (temp3, 0)) != BLKmode
- && can_reverse_comparison_p (temp3, insn))
- {
- rtx temp6, target = 0, seq, init_insn = 0, init = temp2;
- enum rtx_code code = reverse_condition (GET_CODE (temp3));
-
- start_sequence ();
-
- /* It must be the case that TEMP2 is not modified in the range
- [TEMP4, INSN). The one exception we make is if the insn
- before INSN sets TEMP2 to something which is also unchanged
- in that range. In that case, we can move the initialization
- into our sequence. */
-
- if ((temp5 = prev_active_insn (insn)) != 0
- && no_labels_between_p (temp5, insn)
- && GET_CODE (temp5) == INSN
- && (temp6 = single_set (temp5)) != 0
- && rtx_equal_p (temp2, SET_DEST (temp6))
- && (CONSTANT_P (SET_SRC (temp6))
- || GET_CODE (SET_SRC (temp6)) == REG
- || GET_CODE (SET_SRC (temp6)) == SUBREG))
- {
- emit_insn (PATTERN (temp5));
- init_insn = temp5;
- init = SET_SRC (temp6);
- }
-
- if (CONSTANT_P (init)
- || ! reg_set_between_p (init, PREV_INSN (temp4), insn))
- target = emit_store_flag (gen_reg_rtx (GET_MODE (temp2)), code,
- XEXP (temp3, 0), XEXP (temp3, 1),
- VOIDmode,
- (code == LTU || code == LEU
- || code == GTU || code == GEU), 1);
-
- /* If we can do the store-flag, do the addition or
- subtraction. */
-
- if (target)
- target = expand_binop (GET_MODE (temp2),
- (XEXP (SET_SRC (temp1), 1) == const1_rtx
- ? add_optab : sub_optab),
- temp2, target, temp2, 0, OPTAB_WIDEN);
-
- if (target != 0)
- {
- /* Put the result back in temp2 in case it isn't already.
- Then replace the jump, possible a CC0-setting insn in
- front of the jump, and TEMP, with the sequence we have
- made. */
-
- if (target != temp2)
- emit_move_insn (temp2, target);
-
- seq = get_insns ();
- end_sequence ();
-
- emit_insns_before (seq, temp4);
- delete_insn (temp);
-
- if (init_insn)
- delete_insn (init_insn);
-
- next = NEXT_INSN (insn);
-#ifdef HAVE_cc0
- delete_insn (prev_nonnote_insn (insn));
-#endif
- delete_insn (insn);
-
- if (after_regscan)
- {
- reg_scan_update (seq, NEXT_INSN (next), old_max_reg);
- old_max_reg = max_reg_num ();
- }
-
- changed = 1;
- continue;
- }
- else
- end_sequence ();
- }
/* Simplify if (...) x = 1; else {...} if (x) ...
We recognize this case scanning backwards as well.
#endif
else
{
- /* Detect a jump to a jump. */
-
- /* Look for if (foo) bar; else break; */
- /* The insns look like this:
- insn = condjump label1;
- ...range1 (some insns)...
- jump label2;
- label1:
- ...range2 (some insns)...
- jump somewhere unconditionally
- label2: */
- {
- rtx label1 = next_label (insn);
- rtx range1end = label1 ? prev_active_insn (label1) : 0;
- /* Don't do this optimization on the first round, so that
- jump-around-a-jump gets simplified before we ask here
- whether a jump is unconditional.
-
- Also don't do it when we are called after reload since
- it will confuse reorg. */
- if (! first
- && (reload_completed ? ! flag_delayed_branch : 1)
- /* Make sure INSN is something we can invert. */
- && condjump_p (insn)
- && label1 != 0
- && JUMP_LABEL (insn) == label1
- && LABEL_NUSES (label1) == 1
- && GET_CODE (range1end) == JUMP_INSN
- && simplejump_p (range1end))
- {
- rtx label2 = next_label (label1);
- rtx range2end = label2 ? prev_active_insn (label2) : 0;
- if (range1end != range2end
- && JUMP_LABEL (range1end) == label2
- && GET_CODE (range2end) == JUMP_INSN
- && GET_CODE (NEXT_INSN (range2end)) == BARRIER
- /* Invert the jump condition, so we
- still execute the same insns in each case. */
- && invert_jump (insn, label1))
- {
- rtx range1beg = next_active_insn (insn);
- rtx range2beg = next_active_insn (label1);
- rtx range1after, range2after;
- rtx range1before, range2before;
- rtx rangenext;
-
- /* Include in each range any notes before it, to be
- sure that we get the line number note if any, even
- if there are other notes here. */
- while (PREV_INSN (range1beg)
- && GET_CODE (PREV_INSN (range1beg)) == NOTE)
- range1beg = PREV_INSN (range1beg);
-
- while (PREV_INSN (range2beg)
- && GET_CODE (PREV_INSN (range2beg)) == NOTE)
- range2beg = PREV_INSN (range2beg);
-
- /* Don't move NOTEs for blocks or loops; shift them
- outside the ranges, where they'll stay put. */
- range1beg = squeeze_notes (range1beg, range1end);
- range2beg = squeeze_notes (range2beg, range2end);
-
- /* Get current surrounds of the 2 ranges. */
- range1before = PREV_INSN (range1beg);
- range2before = PREV_INSN (range2beg);
- range1after = NEXT_INSN (range1end);
- range2after = NEXT_INSN (range2end);
-
- /* Splice range2 where range1 was. */
- NEXT_INSN (range1before) = range2beg;
- PREV_INSN (range2beg) = range1before;
- NEXT_INSN (range2end) = range1after;
- PREV_INSN (range1after) = range2end;
- /* Splice range1 where range2 was. */
- NEXT_INSN (range2before) = range1beg;
- PREV_INSN (range1beg) = range2before;
- NEXT_INSN (range1end) = range2after;
- PREV_INSN (range2after) = range1end;
-
- /* Check for a loop end note between the end of
- range2, and the next code label. If there is one,
- then what we have really seen is
- if (foo) break; end_of_loop;
- and moved the break sequence outside the loop.
- We must move the LOOP_END note to where the
- loop really ends now, or we will confuse loop
- optimization. Stop if we find a LOOP_BEG note
- first, since we don't want to move the LOOP_END
- note in that case. */
- for (;range2after != label2; range2after = rangenext)
- {
- rangenext = NEXT_INSN (range2after);
- if (GET_CODE (range2after) == NOTE)
- {
- if (NOTE_LINE_NUMBER (range2after)
- == NOTE_INSN_LOOP_END)
- {
- NEXT_INSN (PREV_INSN (range2after))
- = rangenext;
- PREV_INSN (rangenext)
- = PREV_INSN (range2after);
- PREV_INSN (range2after)
- = PREV_INSN (range1beg);
- NEXT_INSN (range2after) = range1beg;
- NEXT_INSN (PREV_INSN (range1beg))
- = range2after;
- PREV_INSN (range1beg) = range2after;
- }
- else if (NOTE_LINE_NUMBER (range2after)
- == NOTE_INSN_LOOP_BEG)
- break;
- }
- }
- changed = 1;
- continue;
- }
- }
- }
-
/* Now that the jump has been tensioned,
try cross jumping: check for identical code
before the jump and before its target label. */
}
}
-#ifdef HAVE_return
- if (HAVE_return)
- {
- /* If we fall through to the epilogue, see if we can insert a RETURN insn
- in front of it. If the machine allows it at this point (we might be
- after reload for a leaf routine), it will improve optimization for it
- to be there. We do this both here and at the start of this pass since
- the RETURN might have been deleted by some of our optimizations. */
- insn = get_last_insn ();
- while (insn && GET_CODE (insn) == NOTE)
- insn = PREV_INSN (insn);
-
- if (insn && GET_CODE (insn) != BARRIER)
- {
- emit_jump_insn (gen_return ());
- emit_barrier ();
- }
- }
-#endif
-
/* CAN_REACH_END is persistent for each function. Once set it should
not be cleared. This is especially true for the case where we
delete the NOTE_FUNCTION_END note. CAN_REACH_END is cleared by
the front-end before compiling each function. */
- if (calculate_can_reach_end (last_insn, 0, 1))
+ if (! minimal && calculate_can_reach_end (last_insn, optimize != 0))
can_reach_end = 1;
- /* Show JUMP_CHAIN no longer valid. */
+end:
+ /* Clean up. */
+ free (jump_chain);
jump_chain = 0;
}
\f
/* Delete insns following barriers, up to next label.
Also delete no-op jumps created by gcse. */
+
static void
delete_barrier_successors (f)
rtx f;
}
/* INSN is now the code_label. */
}
+
/* Also remove (set (pc) (pc)) insns which can be created by
gcse. We eliminate such insns now to avoid having them
cause problems later. */
for (insn = f; insn; insn = NEXT_INSN (insn))
if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
{
- mark_jump_label (PATTERN (insn), insn, cross_jump);
+ if (GET_CODE (insn) == CALL_INSN
+ && GET_CODE (PATTERN (insn)) == CALL_PLACEHOLDER)
+ {
+ mark_all_labels (XEXP (PATTERN (insn), 0), cross_jump);
+ mark_all_labels (XEXP (PATTERN (insn), 1), cross_jump);
+ mark_all_labels (XEXP (PATTERN (insn), 2), cross_jump);
+ continue;
+ }
+
+ mark_jump_label (PATTERN (insn), insn, cross_jump, 0);
if (! INSN_DELETED_P (insn) && GET_CODE (insn) == JUMP_INSN)
{
if (JUMP_LABEL (insn) != 0 && simplejump_p (insn))
for (insn = f; insn; )
{
- if (GET_CODE (insn) == CODE_LABEL && LABEL_NUSES (insn) == 0)
+ if (GET_CODE (insn) == CODE_LABEL
+ && LABEL_NUSES (insn) == 0
+ && LABEL_ALTERNATE_NAME (insn) == NULL)
insn = delete_insn (insn);
else
{
{
register rtx body = PATTERN (insn);
-/* Combine stack_adjusts with following push_insns. */
-#ifdef PUSH_ROUNDING
- if (GET_CODE (body) == SET
- && SET_DEST (body) == stack_pointer_rtx
- && GET_CODE (SET_SRC (body)) == PLUS
- && XEXP (SET_SRC (body), 0) == stack_pointer_rtx
- && GET_CODE (XEXP (SET_SRC (body), 1)) == CONST_INT
- && INTVAL (XEXP (SET_SRC (body), 1)) > 0)
- {
- rtx p;
- rtx stack_adjust_insn = insn;
- int stack_adjust_amount = INTVAL (XEXP (SET_SRC (body), 1));
- int total_pushed = 0;
- int pushes = 0;
-
- /* Find all successive push insns. */
- p = insn;
- /* Don't convert more than three pushes;
- that starts adding too many displaced addresses
- and the whole thing starts becoming a losing
- proposition. */
- while (pushes < 3)
- {
- rtx pbody, dest;
- p = next_nonnote_insn (p);
- if (p == 0 || GET_CODE (p) != INSN)
- break;
- pbody = PATTERN (p);
- if (GET_CODE (pbody) != SET)
- break;
- dest = SET_DEST (pbody);
- /* Allow a no-op move between the adjust and the push. */
- if (GET_CODE (dest) == REG
- && GET_CODE (SET_SRC (pbody)) == REG
- && REGNO (dest) == REGNO (SET_SRC (pbody)))
- continue;
- if (! (GET_CODE (dest) == MEM
- && GET_CODE (XEXP (dest, 0)) == POST_INC
- && XEXP (XEXP (dest, 0), 0) == stack_pointer_rtx))
- break;
- pushes++;
- if (total_pushed + GET_MODE_SIZE (GET_MODE (SET_DEST (pbody)))
- > stack_adjust_amount)
- break;
- total_pushed += GET_MODE_SIZE (GET_MODE (SET_DEST (pbody)));
- }
-
- /* Discard the amount pushed from the stack adjust;
- maybe eliminate it entirely. */
- if (total_pushed >= stack_adjust_amount)
- {
- delete_computation (stack_adjust_insn);
- total_pushed = stack_adjust_amount;
- }
- else
- XEXP (SET_SRC (PATTERN (stack_adjust_insn)), 1)
- = GEN_INT (stack_adjust_amount - total_pushed);
-
- /* Change the appropriate push insns to ordinary stores. */
- p = insn;
- while (total_pushed > 0)
- {
- rtx pbody, dest;
- p = next_nonnote_insn (p);
- if (GET_CODE (p) != INSN)
- break;
- pbody = PATTERN (p);
- if (GET_CODE (pbody) != SET)
- break;
- dest = SET_DEST (pbody);
- /* Allow a no-op move between the adjust and the push. */
- if (GET_CODE (dest) == REG
- && GET_CODE (SET_SRC (pbody)) == REG
- && REGNO (dest) == REGNO (SET_SRC (pbody)))
- continue;
- if (! (GET_CODE (dest) == MEM
- && GET_CODE (XEXP (dest, 0)) == POST_INC
- && XEXP (XEXP (dest, 0), 0) == stack_pointer_rtx))
- break;
- total_pushed -= GET_MODE_SIZE (GET_MODE (SET_DEST (pbody)));
- /* If this push doesn't fully fit in the space
- of the stack adjust that we deleted,
- make another stack adjust here for what we
- didn't use up. There should be peepholes
- to recognize the resulting sequence of insns. */
- if (total_pushed < 0)
- {
- emit_insn_before (gen_add2_insn (stack_pointer_rtx,
- GEN_INT (- total_pushed)),
- p);
- break;
- }
- XEXP (dest, 0)
- = plus_constant (stack_pointer_rtx, total_pushed);
- }
- }
-#endif
-
/* Detect and delete no-op move instructions
resulting from not allocating a parameter in a register. */
if we find it. */
static int
-calculate_can_reach_end (last, check_deleted, delete_final_note)
+calculate_can_reach_end (last, delete_final_note)
rtx last;
- int check_deleted;
int delete_final_note;
{
rtx insn = last;
/* See if we backed up to the appropriate type of note. */
if (insn != NULL_RTX
&& GET_CODE (insn) == NOTE
- && NOTE_LINE_NUMBER (insn) == NOTE_INSN_FUNCTION_END
- && (check_deleted == 0
- || ! INSN_DELETED_P (insn)))
+ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_FUNCTION_END)
{
if (delete_final_note)
delete_insn (insn);
remove_note (insn, p);
if (++num_insns > 20
|| find_reg_note (insn, REG_RETVAL, NULL_RTX)
- || find_reg_note (insn, REG_LIBCALL, NULL_RTX)
- || asm_noperands (PATTERN (insn)) > 0)
+ || find_reg_note (insn, REG_LIBCALL, NULL_RTX))
return 0;
break;
default:
/* We can do the replacement. Allocate reg_map if this is the
first replacement we found. */
if (reg_map == 0)
- {
- reg_map = (rtx *) alloca (max_reg * sizeof (rtx));
- bzero ((char *) reg_map, max_reg * sizeof (rtx));
- }
+ reg_map = (rtx *) xcalloc (max_reg, sizeof (rtx));
REG_LOOP_TEST_P (reg) = 1;
break;
case INSN:
- copy = emit_insn_before (copy_rtx (PATTERN (insn)), loop_start);
+ copy = emit_insn_before (copy_insn (PATTERN (insn)), loop_start);
if (reg_map)
replace_regs (PATTERN (copy), reg_map, max_reg, 1);
- mark_jump_label (PATTERN (copy), copy, 0);
+ mark_jump_label (PATTERN (copy), copy, 0, 0);
/* Copy all REG_NOTES except REG_LABEL since mark_jump_label will
make them. */
for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
if (REG_NOTE_KIND (link) != REG_LABEL)
REG_NOTES (copy)
- = copy_rtx (gen_rtx_EXPR_LIST (REG_NOTE_KIND (link),
+ = copy_insn_1 (gen_rtx_EXPR_LIST (REG_NOTE_KIND (link),
XEXP (link, 0),
REG_NOTES (copy)));
if (reg_map && REG_NOTES (copy))
break;
case JUMP_INSN:
- copy = emit_jump_insn_before (copy_rtx (PATTERN (insn)), loop_start);
+ copy = emit_jump_insn_before (copy_insn (PATTERN (insn)), loop_start);
if (reg_map)
replace_regs (PATTERN (copy), reg_map, max_reg, 1);
- mark_jump_label (PATTERN (copy), copy, 0);
+ mark_jump_label (PATTERN (copy), copy, 0, 0);
if (REG_NOTES (insn))
{
- REG_NOTES (copy) = copy_rtx (REG_NOTES (insn));
+ REG_NOTES (copy) = copy_insn_1 (REG_NOTES (insn));
if (reg_map)
replace_regs (REG_NOTES (copy), reg_map, max_reg, 1);
}
if (! first_copy)
first_copy = copy;
- mark_jump_label (PATTERN (copy), copy, 0);
+ mark_jump_label (PATTERN (copy), copy, 0, 0);
if (INSN_UID (copy) < max_jump_chain
&& INSN_UID (JUMP_LABEL (copy)) < max_jump_chain)
{
emit_note_before (NOTE_INSN_LOOP_VTOP, exitcode);
delete_insn (next_nonnote_insn (loop_start));
+
+ /* Clean up. */
+ if (reg_map)
+ free (reg_map);
return 1;
}
&& GET_MODE_CLASS (GET_MODE (arg0)) != MODE_FLOAT));
}
-/* Given an rtx-code for a comparison, return the code
- for the negated comparison.
- WATCH OUT! reverse_condition is not safe to use on a jump
- that might be acting on the results of an IEEE floating point comparison,
- because of the special treatment of non-signaling nans in comparisons.
+/* Given an rtx-code for a comparison, return the code for the negated
+ comparison. If no such code exists, return UNKNOWN.
+
+ WATCH OUT! reverse_condition is not safe to use on a jump that might
+ be acting on the results of an IEEE floating point comparison, because
+ of the special treatment of non-signaling nans in comparisons.
Use can_reverse_comparison_p to be sure. */
enum rtx_code
{
case EQ:
return NE;
-
case NE:
return EQ;
-
case GT:
return LE;
-
case GE:
return LT;
-
case LT:
return GE;
-
case LE:
return GT;
-
case GTU:
return LEU;
-
case GEU:
return LTU;
-
case LTU:
return GEU;
+ case LEU:
+ return GTU;
+ case UNORDERED:
+ return ORDERED;
+ case ORDERED:
+ return UNORDERED;
+
+ case UNLT:
+ case UNLE:
+ case UNGT:
+ case UNGE:
+ case UNEQ:
+ case LTGT:
+ return UNKNOWN;
+ default:
+ abort ();
+ }
+}
+
+/* Similar, but we're allowed to generate unordered comparisons, which
+ makes it safe for IEEE floating-point. Of course, we have to recognize
+ that the target will support them too... */
+
+enum rtx_code
+reverse_condition_maybe_unordered (code)
+ enum rtx_code code;
+{
+ /* Non-IEEE formats don't have unordered conditions. */
+ if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT)
+ return reverse_condition (code);
+
+ switch (code)
+ {
+ case EQ:
+ return NE;
+ case NE:
+ return EQ;
+ case GT:
+ return UNLE;
+ case GE:
+ return UNLT;
+ case LT:
+ return UNGE;
+ case LE:
+ return UNGT;
+ case LTGT:
+ return UNEQ;
+ case GTU:
+ return LEU;
+ case GEU:
+ return LTU;
+ case LTU:
+ return GEU;
case LEU:
return GTU;
+ case UNORDERED:
+ return ORDERED;
+ case ORDERED:
+ return UNORDERED;
+ case UNLT:
+ return GE;
+ case UNLE:
+ return GT;
+ case UNGT:
+ return LE;
+ case UNGE:
+ return LT;
+ case UNEQ:
+ return LTGT;
default:
abort ();
- return UNKNOWN;
}
}
{
case EQ:
case NE:
+ case UNORDERED:
+ case ORDERED:
+ case UNEQ:
+ case LTGT:
return code;
case GT:
return LT;
-
case GE:
return LE;
-
case LT:
return GT;
-
case LE:
return GE;
-
case GTU:
return LTU;
-
case GEU:
return LEU;
-
case LTU:
return GTU;
-
case LEU:
return GEU;
+ case UNLT:
+ return UNGT;
+ case UNLE:
+ return UNGE;
+ case UNGT:
+ return UNLT;
+ case UNGE:
+ return UNLE;
default:
abort ();
- return UNKNOWN;
}
}
case GT:
return GTU;
-
case GE:
return GEU;
-
case LT:
return LTU;
-
case LE:
return LEU;
case GTU:
return GT;
-
case GEU:
return GE;
-
case LTU:
return LT;
-
case LEU:
return LE;
switch (code1)
{
case EQ:
- if (code2 == LE || code2 == LEU || code2 == GE || code2 == GEU)
+ if (code2 == LE || code2 == LEU || code2 == GE || code2 == GEU
+ || code2 == ORDERED)
return 1;
break;
case LT:
- if (code2 == LE || code2 == NE)
+ if (code2 == LE || code2 == NE || code2 == ORDERED)
return 1;
break;
case GT:
- if (code2 == GE || code2 == NE)
+ if (code2 == GE || code2 == NE || code2 == ORDERED)
+ return 1;
+ break;
+
+ case GE:
+ case LE:
+ if (code2 == ORDERED)
+ return 1;
+ break;
+
+ case LTGT:
+ if (code2 == NE || code2 == ORDERED)
return 1;
break;
if (code2 == GEU || code2 == NE)
return 1;
break;
+
+ case UNORDERED:
+ if (code2 == NE)
+ return 1;
+ break;
default:
break;
void *data ATTRIBUTE_UNUSED;
{
rtx x = *loc;
- return GET_CODE (x) == RETURN;
+ return x && GET_CODE (x) == RETURN;
}
int
two labels distinct if they are separated by only USE or CLOBBER insns. */
static void
-mark_jump_label (x, insn, cross_jump)
+mark_jump_label (x, insn, cross_jump, in_mem)
register rtx x;
rtx insn;
int cross_jump;
+ int in_mem;
{
register RTX_CODE code = GET_CODE (x);
register int i;
case REG:
case SUBREG:
case CONST_INT:
- case SYMBOL_REF:
case CONST_DOUBLE:
case CLOBBER:
case CALL:
return;
case MEM:
+ in_mem = 1;
+ break;
+
+ case SYMBOL_REF:
+ if (!in_mem)
+ return;
+
/* If this is a constant-pool reference, see if it is a label. */
- if (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
- && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)))
- mark_jump_label (get_pool_constant (XEXP (x, 0)), insn, cross_jump);
+ if (CONSTANT_POOL_ADDRESS_P (x))
+ mark_jump_label (get_pool_constant (x), insn, cross_jump, in_mem);
break;
case LABEL_REF:
int eltnum = code == ADDR_DIFF_VEC ? 1 : 0;
for (i = 0; i < XVECLEN (x, eltnum); i++)
- mark_jump_label (XVECEXP (x, eltnum, i), NULL_RTX, cross_jump);
+ mark_jump_label (XVECEXP (x, eltnum, i), NULL_RTX,
+ cross_jump, in_mem);
}
return;
for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
{
if (fmt[i] == 'e')
- mark_jump_label (XEXP (x, i), insn, cross_jump);
+ mark_jump_label (XEXP (x, i), insn, cross_jump, in_mem);
else if (fmt[i] == 'E')
{
register int j;
for (j = 0; j < XVECLEN (x, i); j++)
- mark_jump_label (XVECEXP (x, i, j), insn, cross_jump);
+ mark_jump_label (XVECEXP (x, i, j), insn, cross_jump, in_mem);
}
}
}
delete_computation (insn);
}
+/* Verify INSN is a BARRIER and delete it. */
+
+void
+delete_barrier (insn)
+ rtx insn;
+{
+ if (GET_CODE (insn) != BARRIER)
+ abort ();
+
+ delete_insn (insn);
+}
+
/* Recursively delete prior insns that compute the value (used only by INSN
which the caller is deleting) stored in the register mentioned by NOTE
which is a REG_DEAD note associated with INSN. */
if (was_code_label)
remove_node_from_expr_list (insn, &nonlocal_goto_handler_labels);
- /* Don't delete user-declared labels. Convert them to special NOTEs
- instead. */
- if (was_code_label && LABEL_NAME (insn) != 0
- && optimize && ! dont_really_delete)
+ /* Don't delete user-declared labels. When optimizing, convert them
+ to special NOTEs instead. When not optimizing, leave them alone. */
+ if (was_code_label && LABEL_NAME (insn) != 0)
{
- PUT_CODE (insn, NOTE);
- NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED_LABEL;
- NOTE_SOURCE_FILE (insn) = 0;
- dont_really_delete = 1;
+ if (! optimize)
+ dont_really_delete = 1;
+ else if (! dont_really_delete)
+ {
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED_LABEL;
+ NOTE_SOURCE_FILE (insn) = 0;
+ dont_really_delete = 1;
+ }
}
else
/* Mark this insn as deleted. */
/* Patch out INSN (and the barrier if any) */
- if (optimize && ! dont_really_delete)
+ if (! dont_really_delete)
{
if (prev)
{
for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
{
if (fmt[i] == 'e')
- if (! invert_exp (XEXP (x, i), insn))
- return 0;
- if (fmt[i] == 'E')
+ {
+ if (! invert_exp (XEXP (x, i), insn))
+ return 0;
+ }
+ else if (fmt[i] == 'E')
{
register int j;
for (j = 0; j < XVECLEN (x, i); j++)
if (nlabel)
++LABEL_NUSES (nlabel);
+ /* If we're eliding the jump over exception cleanups at the end of a
+ function, move the function end note so that -Wreturn-type works. */
+ if (olabel && NEXT_INSN (olabel)
+ && GET_CODE (NEXT_INSN (olabel)) == NOTE
+ && NOTE_LINE_NUMBER (NEXT_INSN (olabel)) == NOTE_INSN_FUNCTION_END)
+ emit_note_after (NOTE_INSN_FUNCTION_END, nlabel);
+
if (olabel && --LABEL_NUSES (olabel) == 0)
delete_insn (olabel);
for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
{
if (fmt[i] == 'e')
- if (! redirect_exp (&XEXP (x, i), olabel, nlabel, insn))
- return 0;
- if (fmt[i] == 'E')
+ {
+ if (! redirect_exp (&XEXP (x, i), olabel, nlabel, insn))
+ return 0;
+ }
+ else if (fmt[i] == 'E')
{
register int j;
for (j = 0; j < XVECLEN (x, i); j++)
branch and the second branch. It marks any changed registers. */
static void
-mark_modified_reg (dest, x)
+mark_modified_reg (dest, x, data)
rtx dest;
rtx x ATTRIBUTE_UNUSED;
+ void *data ATTRIBUTE_UNUSED;
{
- int regno, i;
+ int regno;
+ unsigned int i;
if (GET_CODE (dest) == SUBREG)
dest = SUBREG_REG (dest);
int *all_reset;
/* Allocate register tables and quick-reset table. */
- modified_regs = (char *) alloca (max_reg * sizeof (char));
- same_regs = (int *) alloca (max_reg * sizeof (int));
- all_reset = (int *) alloca (max_reg * sizeof (int));
+ modified_regs = (char *) xmalloc (max_reg * sizeof (char));
+ same_regs = (int *) xmalloc (max_reg * sizeof (int));
+ all_reset = (int *) xmalloc (max_reg * sizeof (int));
for (i = 0; i < max_reg; i++)
all_reset[i] = -1;
modified_regs[i] = 1;
}
- note_stores (PATTERN (b2), mark_modified_reg);
+ note_stores (PATTERN (b2), mark_modified_reg, NULL);
}
/* Check the next candidate branch insn from the label
if (rtx_equal_for_thread_p (b1op0, b2op0, b2)
&& rtx_equal_for_thread_p (b1op1, b2op1, b2)
&& (comparison_dominates_p (code1, code2)
- || (comparison_dominates_p (code1, reverse_condition (code2))
- && can_reverse_comparison_p (XEXP (SET_SRC (PATTERN (b1)),
- 0),
- b1))))
+ || (can_reverse_comparison_p (XEXP (SET_SRC (PATTERN (b1)),
+ 0),
+ b1)
+ && comparison_dominates_p (code1, reverse_condition (code2)))))
+
{
t1 = prev_nonnote_insn (b1);
t2 = prev_nonnote_insn (b2);
}
}
}
+
+ /* Clean up. */
+ free (modified_regs);
+ free (same_regs);
+ free (all_reset);
}
\f
/* This is like RTX_EQUAL_P except that it knows about our handling of
return 1;
}
else
- return (same_regs[REGNO (x)] == REGNO (y));
+ return (same_regs[REGNO (x)] == (int) REGNO (y));
break;
if (GET_CODE (SET_DEST (x)) == REG
&& GET_CODE (SET_DEST (y)) == REG)
{
- if (same_regs[REGNO (SET_DEST (x))] == REGNO (SET_DEST (y)))
+ if (same_regs[REGNO (SET_DEST (x))] == (int) REGNO (SET_DEST (y)))
{
same_regs[REGNO (SET_DEST (x))] = -1;
num_same_regs--;