/* If-conversion support.
- Copyright (C) 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
+ Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005
+ Free Software Foundation, Inc.
This file is part of GCC.
/* Forward references. */
static int count_bb_insns (basic_block);
-static int total_bb_rtx_cost (basic_block);
+static bool cheap_bb_rtx_cost_p (basic_block, int);
static rtx first_active_insn (basic_block);
static rtx last_active_insn (basic_block, int);
static basic_block block_fallthru (basic_block);
return count;
}
-/* Count the total insn_rtx_cost of non-jump active insns in BB.
- This function returns -1, if the cost of any instruction could
- not be estimated. */
+/* Determine whether the total insn_rtx_cost on non-jump insns in
+ basic block BB is less than MAX_COST. This function returns
+ false if the cost of any instruction could not be estimated. */
-static int
-total_bb_rtx_cost (basic_block bb)
+static bool
+cheap_bb_rtx_cost_p (basic_block bb, int max_cost)
{
int count = 0;
rtx insn = BB_HEAD (bb);
{
int cost = insn_rtx_cost (PATTERN (insn));
if (cost == 0)
- return -1;
+ return false;
+
+ /* If this instruction is the load or set of a "stack" register,
+ such as a floating point register on x87, then the cost of
+ speculatively executing this instruction needs to include
+ the additional cost of popping this register off of the
+ register stack. */
+#ifdef STACK_REGS
+ {
+ rtx set = single_set (insn);
+ if (set && STACK_REG_P (SET_DEST (set)))
+ cost += COSTS_N_INSNS (1);
+ }
+#endif
+
count += cost;
+ if (count >= max_cost)
+ return false;
}
else if (CALL_P (insn))
- return -1;
+ return false;
if (insn == BB_END (bb))
break;
insn = NEXT_INSN (insn);
}
- return count;
+ return true;
}
/* Return the first non-jump active insn in the basic block. */
rtx a = if_info->a;
rtx b = if_info->b;
rtx x = if_info->x;
+ rtx orig_a, orig_b;
rtx insn_a, insn_b;
rtx tmp, target;
int is_mem = 0;
start_sequence ();
+ orig_a = a;
+ orig_b = b;
+
/* If either operand is complex, load it into a register first.
The best way to do this is to copy the original insn. In this
way we preserve any clobbers etc that the insn may have had.
}
if (! general_operand (b, GET_MODE (b)))
{
- rtx set;
+ rtx set, last;
if (no_new_pseudos)
goto end_seq_and_fail;
if (is_mem)
{
tmp = gen_reg_rtx (GET_MODE (b));
- tmp = emit_insn (gen_rtx_SET (VOIDmode,
- tmp,
- b));
+ tmp = gen_rtx_SET (VOIDmode, tmp, b);
}
else if (! insn_b)
goto end_seq_and_fail;
tmp = copy_rtx (insn_b);
set = single_set (tmp);
SET_DEST (set) = b;
- tmp = emit_insn (PATTERN (tmp));
+ tmp = PATTERN (tmp);
}
+
+ /* If insn to set up A clobbers any registers B depends on, try to
+ swap insn that sets up A with the one that sets up B. If even
+ that doesn't help, punt. */
+ last = get_last_insn ();
+ if (last && modified_in_p (orig_b, last))
+ {
+ tmp = emit_insn_before (tmp, get_insns ());
+ if (modified_in_p (orig_a, tmp))
+ goto end_seq_and_fail;
+ }
+ else
+ tmp = emit_insn (tmp);
+
if (recog_memoized (tmp) < 0)
goto end_seq_and_fail;
}
{
basic_block then_bb = then_edge->dest;
basic_block else_bb = else_edge->dest, new_bb;
- int then_bb_index, bb_cost;
+ int then_bb_index;
/* If we are partitioning hot/cold basic blocks, we don't want to
mess up unconditional or indirect jumps that cross between hot
test_bb->index, then_bb->index);
/* THEN is small. */
- bb_cost = total_bb_rtx_cost (then_bb);
- if (bb_cost < 0 || bb_cost >= COSTS_N_INSNS (BRANCH_COST))
+ if (! cheap_bb_rtx_cost_p (then_bb, COSTS_N_INSNS (BRANCH_COST)))
return FALSE;
/* Registers set are dead, or are predicable. */
else_bb->global_live_at_start,
then_bb->global_live_at_end);
- new_bb = redirect_edge_and_branch_force (FALLTHRU_EDGE (test_bb), else_bb);
+
+ /* We can avoid creating a new basic block if then_bb is immediately
+ followed by else_bb, i.e. deleting then_bb allows test_bb to fall
+ thru to else_bb. */
+
+ if (then_bb->next_bb == else_bb
+ && then_bb->prev_bb == test_bb
+ && else_bb != EXIT_BLOCK_PTR)
+ {
+ redirect_edge_succ (FALLTHRU_EDGE (test_bb), else_bb);
+ new_bb = 0;
+ }
+ else
+ new_bb = redirect_edge_and_branch_force (FALLTHRU_EDGE (test_bb),
+ else_bb);
+
then_bb_index = then_bb->index;
delete_basic_block (then_bb);
basic_block then_bb = then_edge->dest;
basic_block else_bb = else_edge->dest;
edge else_succ;
- int bb_cost;
rtx note;
/* If we are partitioning hot/cold basic blocks, we don't want to
test_bb->index, else_bb->index);
/* ELSE is small. */
- bb_cost = total_bb_rtx_cost (else_bb);
- if (bb_cost < 0 || bb_cost >= COSTS_N_INSNS (BRANCH_COST))
+ if (! cheap_bb_rtx_cost_p (else_bb, COSTS_N_INSNS (BRANCH_COST)))
return FALSE;
/* Registers set are dead, or are predicable. */
TEST_SET = set of registers set between EARLIEST and the
end of the block. */
- tmp = OBSTACK_ALLOC_REG_SET (®_obstack);
- merge_set = OBSTACK_ALLOC_REG_SET (®_obstack);
- test_live = OBSTACK_ALLOC_REG_SET (®_obstack);
- test_set = OBSTACK_ALLOC_REG_SET (®_obstack);
+ tmp = ALLOC_REG_SET (®_obstack);
+ merge_set = ALLOC_REG_SET (®_obstack);
+ test_live = ALLOC_REG_SET (®_obstack);
+ test_set = ALLOC_REG_SET (®_obstack);
/* ??? bb->local_set is only valid during calculate_global_regs_live,
so we must recompute usage for MERGE_BB. Not so bad, I suppose,