/* Instruction scheduling pass. This file computes dependencies between
instructions.
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
+ 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+ 2011
Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
and currently maintained by, Jim Wilson (wilson@cygnus.com)
#include "system.h"
#include "coretypes.h"
#include "tm.h"
-#include "toplev.h"
+#include "diagnostic-core.h"
#include "rtl.h"
#include "tm_p.h"
#include "hard-reg-set.h"
#include "insn-config.h"
#include "insn-attr.h"
#include "except.h"
-#include "toplev.h"
#include "recog.h"
#include "sched-int.h"
#include "params.h"
#define CHECK (false)
#endif
+/* In deps->last_pending_memory_flush marks JUMP_INSNs that weren't
+ added to the list because of flush_pending_lists, stands just
+ for itself and not for any other pending memory reads/writes. */
+#define NON_FLUSH_JUMP_KIND REG_DEP_ANTI
+#define NON_FLUSH_JUMP_P(x) (REG_NOTE_KIND (x) == NON_FLUSH_JUMP_KIND)
+
/* Holds current parameters for the dependency analyzer. */
struct sched_deps_info_def *sched_deps_info;
/* The following instructions, which depend on a speculatively scheduled
instruction, cannot be speculatively scheduled along. */
{
- if (may_trap_p (PATTERN (insn)))
- /* If instruction might trap, it cannot be speculatively scheduled.
+ if (may_trap_or_fault_p (PATTERN (insn)))
+ /* If instruction might fault, it cannot be speculatively scheduled.
For control speculation it's obvious why and for data speculation
it's because the insn might get wrong input if speculation
wasn't successful. */
INSN_FORW_DEPS (insn) = create_deps_list ();
INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
- if (DEBUG_INSN_P (insn))
- DEBUG_INSN_SCHED_P (insn) = TRUE;
-
/* ??? It would be nice to allocate dependency caches here. */
}
{
/* ??? It would be nice to deallocate dependency caches here. */
- if (DEBUG_INSN_P (insn))
- {
- gcc_assert (DEBUG_INSN_SCHED_P (insn));
- DEBUG_INSN_SCHED_P (insn) = FALSE;
- }
-
free_deps_list (INSN_HARD_BACK_DEPS (insn));
INSN_HARD_BACK_DEPS (insn) = NULL;
{
rtx list, next;
- if (deps->readonly)
+ /* We don't want to short-circuit dependencies involving debug
+ insns, because they may cause actual dependencies to be
+ disregarded. */
+ if (deps->readonly || DEBUG_INSN_P (insn))
{
add_dependence_list (insn, *listp, uncond, dep_type);
return;
delete_all_dependences (insn);
- prev_nonnote = prev_nonnote_insn (insn);
- while (DEBUG_INSN_P (prev_nonnote))
- prev_nonnote = prev_nonnote_insn (prev_nonnote);
+ prev_nonnote = prev_nonnote_nondebug_insn (insn);
if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
&& ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
if (sched_deps_info->use_cselib)
{
mem = shallow_copy_rtx (mem);
- XEXP (mem, 0) = cselib_subst_to_values (XEXP (mem, 0));
+ XEXP (mem, 0) = cselib_subst_to_values (XEXP (mem, 0), GET_MODE (mem));
}
link = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
*mem_list = link;
enum reg_class cl;
gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
- cl = sched_regno_cover_class[regno];
+ cl = sched_regno_pressure_class[regno];
if (cl != NO_REGS)
{
- incr = ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (regno)];
+ incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
if (clobber_p)
{
new_incr = reg_pressure_info[cl].clobber_increase + incr;
gcc_assert (regno < FIRST_PSEUDO_REGISTER);
if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
{
- cl = sched_regno_cover_class[regno];
+ cl = sched_regno_pressure_class[regno];
if (cl != NO_REGS)
{
if (clobber_p)
enum reg_class cl;
gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
- cl = sched_regno_cover_class[regno];
+ cl = sched_regno_pressure_class[regno];
if (cl != NO_REGS)
{
- incr = ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (regno)];
+ incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
reg_pressure_info[cl].change -= incr;
}
}
gcc_assert (regno < FIRST_PSEUDO_REGISTER);
if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
{
- cl = sched_regno_cover_class[regno];
+ cl = sched_regno_pressure_class[regno];
if (cl != NO_REGS)
reg_pressure_info[cl].change -= 1;
}
}
/* Set up reg pressure info related to INSN. */
-static void
-setup_insn_reg_pressure_info (rtx insn)
+void
+init_insn_reg_pressure_info (rtx insn)
{
int i, len;
enum reg_class cl;
if (! INSN_P (insn))
return;
- for (i = 0; i < ira_reg_class_cover_size; i++)
+ for (i = 0; i < ira_pressure_classes_num; i++)
{
- cl = ira_reg_class_cover[i];
+ cl = ira_pressure_classes[i];
reg_pressure_info[cl].clobber_increase = 0;
reg_pressure_info[cl].set_increase = 0;
reg_pressure_info[cl].unused_set_increase = 0;
if (REG_NOTE_KIND (link) == REG_DEAD)
mark_reg_death (XEXP (link, 0));
- len = sizeof (struct reg_pressure_data) * ira_reg_class_cover_size;
+ len = sizeof (struct reg_pressure_data) * ira_pressure_classes_num;
pressure_info
= INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
- INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_reg_class_cover_size
+ INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_pressure_classes_num
* sizeof (int), 1);
- for (i = 0; i < ira_reg_class_cover_size; i++)
+ for (i = 0; i < ira_pressure_classes_num; i++)
{
- cl = ira_reg_class_cover[i];
+ cl = ira_pressure_classes[i];
pressure_info[i].clobber_increase
= reg_pressure_info[cl].clobber_increase;
pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
/* Treat all writes to a stack register as modifying the TOS. */
if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
{
- int nregs;
-
/* Avoid analyzing the same register twice. */
if (regno != FIRST_STACK_REG)
sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
- nregs = hard_regno_nregs[FIRST_STACK_REG][mode];
- while (--nregs >= 0)
- SET_HARD_REG_BIT (implicit_reg_pending_uses,
- FIRST_STACK_REG + nregs);
+ add_to_hard_reg_set (&implicit_reg_pending_uses, mode,
+ FIRST_STACK_REG);
}
#endif
}
= targetm.addr_space.address_mode (MEM_ADDR_SPACE (dest));
t = shallow_copy_rtx (dest);
- cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1, insn);
- XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
+ cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
+ GET_MODE (t), insn);
+ XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0), GET_MODE (t));
}
t = canon_rtx (t);
= targetm.addr_space.address_mode (MEM_ADDR_SPACE (t));
t = shallow_copy_rtx (t);
- cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1, insn);
- XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
+ cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
+ GET_MODE (t), insn);
+ XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0), GET_MODE (t));
}
if (!DEBUG_INSN_P (insn))
for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
{
- if (! JUMP_P (XEXP (u, 0)))
+ if (! NON_FLUSH_JUMP_P (u))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
else if (deps_may_trap_p (x))
{
if (JUMP_P (insn))
{
rtx next;
- next = next_nonnote_insn (insn);
- while (next && DEBUG_INSN_P (next))
- next = next_nonnote_insn (next);
+ next = next_nonnote_nondebug_insn (insn);
if (next && BARRIER_P (next))
reg_pending_barrier = MOVE_BARRIER;
else
if (sched_pressure_p)
{
setup_insn_reg_uses (deps, insn);
- setup_insn_reg_pressure_info (insn);
+ init_insn_reg_pressure_info (insn);
}
/* Add register dependencies for insn. */
REG_DEP_ANTI);
for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
- if (! JUMP_P (XEXP (u, 0))
- || !sel_sched_p ())
+ if (! NON_FLUSH_JUMP_P (u) || !sel_sched_p ())
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
flush_pending_lists (deps, insn, true, true);
else
- deps->last_pending_memory_flush
- = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
+ {
+ deps->last_pending_memory_flush
+ = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
+ /* Signal to sched_analyze_insn that this jump stands
+ just for its own, not any other pending memory
+ reads/writes flush_pending_lists had to flush. */
+ PUT_REG_NOTE_KIND (deps->last_pending_memory_flush,
+ NON_FLUSH_JUMP_KIND);
+ }
}
sched_analyze_insn (deps, PATTERN (insn), insn);
hard registers correct. */
if (! reload_completed && !LABEL_P (head))
{
- rtx insn = prev_nonnote_insn (head);
+ rtx insn = prev_nonnote_nondebug_insn (head);
- while (insn && DEBUG_INSN_P (insn))
- insn = prev_nonnote_insn (insn);
if (insn && CALL_P (insn))
deps->in_post_call_group_p = post_call_initial;
}
/* As we initialize reg_last lazily, it is possible that we didn't allocate
it at all. */
- if (deps->reg_last)
- free (deps->reg_last);
+ free (deps->reg_last);
deps->reg_last = NULL;
deps = NULL;